aboutsummaryrefslogtreecommitdiff
path: root/src/Specific/Framework
diff options
context:
space:
mode:
authorGravatar Jason Gross <jgross@mit.edu>2017-11-02 01:48:38 -0400
committerGravatar Jason Gross <jgross@mit.edu>2017-11-02 01:50:36 -0400
commit4786b83d53c76d802c7dced2e05977f945e026f4 (patch)
tree677a784dfc339fb643bad83423d1e098b4b98425 /src/Specific/Framework
parentd720ad8946733fce359aebe71d9edc617927c759 (diff)
Better generation of autogenerated c files
Also move bench framework to src/Specific/Framework/bench/
Diffstat (limited to 'src/Specific/Framework')
-rw-r--r--src/Specific/Framework/bench/gmpsec.c208
-rw-r--r--src/Specific/Framework/bench/gmpvar.c207
-rw-r--r--src/Specific/Framework/bench/gmpxx.cpp129
-rw-r--r--src/Specific/Framework/bench/montladder.py57
-rwxr-xr-xsrc/Specific/Framework/bench/prettyprint.py77
5 files changed, 678 insertions, 0 deletions
diff --git a/src/Specific/Framework/bench/gmpsec.c b/src/Specific/Framework/bench/gmpsec.c
new file mode 100644
index 000000000..aa949952a
--- /dev/null
+++ b/src/Specific/Framework/bench/gmpsec.c
@@ -0,0 +1,208 @@
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <gmp.h>
+
+// modulus, encoded as big-endian bytes
+static const unsigned char modulus[] = {0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xed};
+static const unsigned char a_minus_two_over_four[] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xdb,0x41};
+#define modulus_bytes (sizeof(modulus))
+#define modulus_limbs ((8*sizeof(modulus) + GMP_LIMB_BITS-1)/GMP_LIMB_BITS)
+
+
+static void fe_print(mp_limb_t* fe) {
+ printf("0x");
+ for (size_t i = modulus_limbs-1; i > 0; --i) { printf("%016lx", fe[i]); }
+ printf("%016lx", fe[0]);
+}
+
+static void crypto_scalarmult(uint8_t *out, const uint8_t *secret, size_t secretbits, const uint8_t *point) {
+ // curve constants
+ mp_limb_t m[modulus_limbs+1];
+ mp_limb_t a24[modulus_limbs+1];
+ assert(mpn_set_str(m, modulus, modulus_bytes, 256) == (mp_size_t)modulus_limbs);
+ assert(mpn_set_str(a24, a_minus_two_over_four, sizeof(a_minus_two_over_four), 256) <= (mp_size_t)modulus_limbs);
+
+ // allocate scratch space for internal use by GMP.
+ // as GMP _itch are documented as functions, not macros, we use a
+ // variable-size stack allocation. hopefully the compiler will inline _itch
+ // functions and figure out the correct stack frame size statically through
+ // constant propagation.
+ mp_size_t mulscratch_sz = mpn_sec_mul_itch(modulus_limbs, modulus_limbs);
+ mp_size_t sqrscratch_sz = mpn_sec_sqr_itch(modulus_limbs);
+ mp_size_t modscratch_sz = mpn_sec_div_r_itch(modulus_limbs+modulus_limbs, modulus_limbs);
+ mp_size_t invscratch_sz = mpn_sec_invert_itch(modulus_limbs);
+ mp_size_t scratch_sz = mulscratch_sz;
+ scratch_sz = (sqrscratch_sz > scratch_sz) ? sqrscratch_sz : scratch_sz;
+ scratch_sz = (modscratch_sz > scratch_sz) ? modscratch_sz : scratch_sz;
+ scratch_sz = (invscratch_sz > scratch_sz) ? invscratch_sz : scratch_sz;
+ mp_limb_t scratch[scratch_sz];
+ for (size_t i = 0; i<scratch_sz; ++i) { scratch[i] = 0; }
+
+ // allocate scratch space for use by the field operation macros.
+ mp_limb_t _product_tmp[modulus_limbs+modulus_limbs];
+
+ #define fe_mul(out, x, y) do { \
+ mpn_sec_mul(_product_tmp, x, modulus_limbs, y, modulus_limbs, scratch); \
+ mpn_sec_div_r(_product_tmp, modulus_limbs+modulus_limbs, m, modulus_limbs, scratch); \
+ for (size_t i = 0; i<modulus_limbs; i++) { out[i] = _product_tmp[i]; } \
+ } while (0)
+
+ #define fe_sqr(out, x) do { \
+ mpn_sec_sqr(_product_tmp, x, modulus_limbs, scratch); \
+ mpn_sec_div_r(_product_tmp, modulus_limbs+modulus_limbs, m, modulus_limbs, scratch); \
+ for (size_t i = 0; i<modulus_limbs; i++) { out[i] = _product_tmp[i]; } \
+ } while (0)
+
+ #define fe_add(out, x, y) do { \
+ mpn_cnd_sub_n(mpn_add_n(out, x, y, modulus_limbs), out, out, m, modulus_limbs); \
+ } while (0)
+
+ #define fe_sub(out, x, y) do { \
+ mpn_cnd_add_n(mpn_sub_n(out, x, y, modulus_limbs), out, out, m, modulus_limbs); \
+ } while (0)
+
+ #define fe_inv(out, x) do { \
+ for (size_t i = 0; i<modulus_limbs; i++) { _product_tmp[i] = x[i]; } \
+ mp_size_t invertible = mpn_sec_invert(out, _product_tmp, m, modulus_limbs, 2*modulus_limbs*GMP_NUMB_BITS, scratch); \
+ mpn_cnd_sub_n(1-invertible, out, out, out, modulus_limbs); \
+ } while (0)
+
+ mp_limb_t a[modulus_limbs] = {0}; mp_limb_t *nqpqx = a;
+ mp_limb_t b[modulus_limbs] = {1}; mp_limb_t *nqpqz = b;
+ mp_limb_t c[modulus_limbs] ={1}; mp_limb_t *nqx = c;
+ mp_limb_t d[modulus_limbs] = {0}; mp_limb_t *nqz = d;
+ mp_limb_t e[modulus_limbs] = {0}; mp_limb_t *nqpqx2 = e;
+ mp_limb_t f[modulus_limbs] = {1}; mp_limb_t *nqpqz2 = f;
+ mp_limb_t g[modulus_limbs] = {0}; mp_limb_t *nqx2 = g;
+ mp_limb_t h[modulus_limbs] = {1}; mp_limb_t *nqz2 = h;
+ mp_limb_t *t;
+
+ uint8_t revpoint[modulus_bytes];
+ for (size_t i = 0; i<modulus_bytes; i++) { revpoint[i] = point[modulus_bytes-1-i]; }
+ for (size_t i = 0; i<modulus_limbs; i++) { nqpqx[i] = 0; }
+ assert(mpn_set_str(nqpqx, revpoint, modulus_bytes, 256) <= (mp_size_t)modulus_limbs);
+
+ mp_limb_t qmqp[modulus_limbs];
+ for (size_t i = 0; i<modulus_limbs; i++) { qmqp[i] = nqpqx[i]; }
+
+ for (size_t i = secretbits-1; i < secretbits; --i) {
+ mp_limb_t bit = (secret[i/8] >> (i%8))&1;
+ // printf("%01d ", bit);
+ // { mp_limb_t pr[modulus_limbs]; fe_inv(pr, nqz); fe_mul(pr, pr, nqx); fe_print(pr); }
+ // printf(" ");
+ // { mp_limb_t pr[modulus_limbs]; fe_inv(pr, nqpqz); fe_mul(pr, pr, nqpqx); fe_print(pr); }
+ // printf("\n");
+
+ mpn_cnd_swap(bit, nqx, nqpqx, modulus_limbs);
+ mpn_cnd_swap(bit, nqz, nqpqz, modulus_limbs);
+
+ mp_limb_t *x2 = nqx2;
+ mp_limb_t *z2 = nqz2;
+ mp_limb_t *x3 = nqpqx2;
+ mp_limb_t *z3 = nqpqz2;
+ mp_limb_t *x = nqx;
+ mp_limb_t *z = nqz;
+ mp_limb_t *xprime = nqpqx;
+ mp_limb_t *zprime = nqpqz;
+ // fmonty(mp_limb_t *x2, mp_limb_t 0*z2, /* output 2Q */
+ // mp_limb_t *x3, mp_limb_t *z3, /* output Q + Q' */
+ // mp_limb_t *x, mp_limb_t *z, /* input Q */
+ // mp_limb_t *xprime, mp_limb_t *zprime, /* input Q' */
+ // const mp_limb_t *qmqp /* input Q - Q' */) {
+
+ mp_limb_t origx[modulus_limbs], origxprime[modulus_limbs], zzz[modulus_limbs], xx[modulus_limbs], zz[modulus_limbs], xxprime[modulus_limbs], zzprime[modulus_limbs], zzzprime[modulus_limbs];
+
+ for (size_t i = 0; i<modulus_limbs; i++) { origx[i] = x[i]; }
+ fe_add(x, x, z);
+ fe_sub(z, origx, z);
+
+ for (size_t i = 0; i<modulus_limbs; i++) { origxprime[i] = xprime[i]; }
+ fe_add(xprime, xprime, zprime);
+ fe_sub(zprime, origxprime, zprime);
+ fe_mul(xxprime, xprime, z);
+ fe_mul(zzprime, x, zprime);
+ for (size_t i = 0; i<modulus_limbs; i++) { origxprime[i] = xxprime[i]; }
+ fe_add(xxprime, xxprime, zzprime);
+ fe_sub(zzprime, origxprime, zzprime);
+ fe_sqr(x3, xxprime);
+ fe_sqr(zzzprime, zzprime);
+ fe_mul(z3, zzzprime, qmqp);
+
+ fe_sqr(xx, x);
+ fe_sqr(zz, z);
+ fe_mul(x2, xx, zz);
+ fe_sub(zz, xx, zz);
+ fe_mul(zzz, zz, a24);
+ fe_add(zzz, zzz, xx);
+ fe_mul(z2, zz, zzz);
+
+ // } fmonty
+
+ mpn_cnd_swap(bit, nqx2, nqpqx2, modulus_limbs);
+ mpn_cnd_swap(bit, nqz2, nqpqz2, modulus_limbs);
+
+ t = nqx;
+ nqx = nqx2;
+ nqx2 = t;
+ t = nqz;
+ nqz = nqz2;
+ nqz2 = t;
+ t = nqpqx;
+ nqpqx = nqpqx2;
+ nqpqx2 = t;
+ t = nqpqz;
+ nqpqz = nqpqz2;
+ nqpqz2 = t;
+
+ }
+
+ fe_inv(nqz, nqz);
+ fe_mul(nqx, nqx, nqz);
+
+ for (size_t i = 0; i < modulus_bytes; i++) { out[i] = 0; }
+ for (size_t i = 0; i < 8*modulus_bytes; i++) {
+ mp_limb_t bit = (nqx[i/GMP_LIMB_BITS] >> (i%GMP_LIMB_BITS))&1;
+ out [i/8] |= bit<<(i%8);
+ }
+}
+
+
+int main() {
+ // {
+ // uint8_t out[sizeof(modulus)] = {0};
+ // uint8_t point[sizeof(modulus)] = {9};
+ // uint8_t secret[sizeof(modulus)] = {1};
+ // crypto_scalarmult(out, point, secret, 256);
+ // printf("0x"); for (int i = 31; i>=0; --i) { printf("%02x", out[i]); }; printf("\n");
+ // }
+
+ {
+ const uint8_t expected[32] = {0x89, 0x16, 0x1f, 0xde, 0x88, 0x7b, 0x2b, 0x53, 0xde, 0x54, 0x9a, 0xf4, 0x83, 0x94, 0x01, 0x06, 0xec, 0xc1, 0x14, 0xd6, 0x98, 0x2d, 0xaa, 0x98, 0x25, 0x6d, 0xe2, 0x3b, 0xdf, 0x77, 0x66, 0x1a};
+ const uint8_t basepoint[32] = {9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+
+ uint8_t a[32] = {0}, b[32] = {0};
+ uint8_t* in = a;
+ uint8_t* out = b;
+ a[0] = 1;
+
+ for (int i = 0; i < 200; i++) {
+ in[0] &= 248;
+ in[31] &= 127;
+ in[31] |= 64;
+
+ crypto_scalarmult(out, in, 256, basepoint);
+ uint8_t* t = out;
+ out = in;
+ in = t;
+ }
+
+ for (int i = 0; i < 32; i++) {
+ if (in[i] != expected[i]) {
+ return (i+1);
+ }
+ }
+ return 0;
+ }
+}
diff --git a/src/Specific/Framework/bench/gmpvar.c b/src/Specific/Framework/bench/gmpvar.c
new file mode 100644
index 000000000..97b10109e
--- /dev/null
+++ b/src/Specific/Framework/bench/gmpvar.c
@@ -0,0 +1,207 @@
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <gmp.h>
+
+// modulus, encoded as big-endian bytes
+static const unsigned char modulus[] = {0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xed};
+static const unsigned char a_minus_two_over_four[] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xdb,0x41};
+#define modulus_bytes (sizeof(modulus))
+#define modulus_limbs ((8*sizeof(modulus) + GMP_LIMB_BITS-1)/GMP_LIMB_BITS)
+
+
+static void fe_print(mp_limb_t* fe) {
+ printf("0x");
+ for (size_t i = modulus_limbs-1; i > 0; --i) { printf("%016lx", fe[i]); }
+ printf("%016lx", fe[0]);
+}
+
+static void crypto_scalarmult(uint8_t *out, const uint8_t *secret, size_t secretbits, const uint8_t *point) {
+ // curve constants
+ mp_limb_t m[modulus_limbs+1];
+ mp_limb_t a24[modulus_limbs+1];
+ assert(mpn_set_str(m, modulus, modulus_bytes, 256) == (mp_size_t)modulus_limbs);
+ assert(mpn_set_str(a24, a_minus_two_over_four, sizeof(a_minus_two_over_four), 256) <= (mp_size_t)modulus_limbs);
+
+ // allocate scratch space for internal use by GMP.
+ // as GMP _itch are documented as functions, not macros, we use a
+ // variable-size stack allocation. hopefully the compiler will inline _itch
+ // functions and figure out the correct stack frame size statically through
+ // constant propagation.
+ mp_size_t invscratch_sz = mpn_sec_invert_itch(modulus_limbs);
+ mp_size_t scratch_sz = invscratch_sz;
+ scratch_sz = (modulus_limbs > scratch_sz) ? modulus_limbs : scratch_sz;
+ mp_limb_t scratch[scratch_sz];
+ for (size_t i = 0; i<scratch_sz; ++i) { scratch[i] = 0; }
+
+ // allocate scratch space for use by the field operation macros.
+ mp_limb_t _product_tmp[modulus_limbs+modulus_limbs];
+
+ #define fe_mul(out, x, y) do { \
+ mpn_mul(_product_tmp, x, modulus_limbs, y, modulus_limbs); \
+ mpn_tdiv_qr(scratch, _product_tmp, 0, _product_tmp, modulus_limbs+modulus_limbs, m, modulus_limbs); \
+ for (size_t i = 0; i<modulus_limbs; i++) { out[i] = _product_tmp[i]; } \
+ } while (0)
+
+ #define fe_sqr(out, x) do { \
+ mpn_sqr(_product_tmp, x, modulus_limbs); \
+ mpn_tdiv_qr(scratch, _product_tmp, 0, _product_tmp, modulus_limbs+modulus_limbs, m, modulus_limbs); \
+ for (size_t i = 0; i<modulus_limbs; i++) { out[i] = _product_tmp[i]; } \
+ } while (0)
+
+ #define fe_add(out, x, y) do { \
+ if (mpn_add_n(out, x, y, modulus_limbs)) { \
+ mpn_sub_n(out, out, m, modulus_limbs); \
+ } \
+ } while (0)
+
+ #define fe_sub(out, x, y) do { \
+ if (mpn_sub_n(out, x, y, modulus_limbs)) { \
+ mpn_add_n(out, out, m, modulus_limbs); \
+ } \
+ } while (0)
+
+ #define fe_inv(out, x) do { \
+ for (size_t i = 0; i<modulus_limbs; i++) { _product_tmp[i] = x[i]; } \
+ mp_size_t invertible = mpn_sec_invert(out, _product_tmp, m, modulus_limbs, 2*modulus_limbs*GMP_NUMB_BITS, scratch); \
+ mpn_cnd_sub_n(1-invertible, out, out, out, modulus_limbs); \
+ } while (0)
+
+ mp_limb_t a[modulus_limbs] = {0}; mp_limb_t *nqpqx = a;
+ mp_limb_t b[modulus_limbs] = {1}; mp_limb_t *nqpqz = b;
+ mp_limb_t c[modulus_limbs] ={1}; mp_limb_t *nqx = c;
+ mp_limb_t d[modulus_limbs] = {0}; mp_limb_t *nqz = d;
+ mp_limb_t e[modulus_limbs] = {0}; mp_limb_t *nqpqx2 = e;
+ mp_limb_t f[modulus_limbs] = {1}; mp_limb_t *nqpqz2 = f;
+ mp_limb_t g[modulus_limbs] = {0}; mp_limb_t *nqx2 = g;
+ mp_limb_t h[modulus_limbs] = {1}; mp_limb_t *nqz2 = h;
+ mp_limb_t *t;
+
+ uint8_t revpoint[modulus_bytes];
+ for (size_t i = 0; i<modulus_bytes; i++) { revpoint[i] = point[modulus_bytes-1-i]; }
+ for (size_t i = 0; i<modulus_limbs; i++) { nqpqx[i] = 0; }
+ assert(mpn_set_str(nqpqx, revpoint, modulus_bytes, 256) <= (mp_size_t)modulus_limbs);
+
+ mp_limb_t qmqp[modulus_limbs];
+ for (size_t i = 0; i<modulus_limbs; i++) { qmqp[i] = nqpqx[i]; }
+
+ for (size_t i = secretbits-1; i < secretbits; --i) {
+ mp_limb_t bit = (secret[i/8] >> (i%8))&1;
+ // printf("%01d ", bit);
+ // { mp_limb_t pr[modulus_limbs]; fe_inv(pr, nqz); fe_mul(pr, pr, nqx); fe_print(pr); }
+ // printf(" ");
+ // { mp_limb_t pr[modulus_limbs]; fe_inv(pr, nqpqz); fe_mul(pr, pr, nqpqx); fe_print(pr); }
+ // printf("\n");
+
+ mpn_cnd_swap(bit, nqx, nqpqx, modulus_limbs);
+ mpn_cnd_swap(bit, nqz, nqpqz, modulus_limbs);
+
+ mp_limb_t *x2 = nqx2;
+ mp_limb_t *z2 = nqz2;
+ mp_limb_t *x3 = nqpqx2;
+ mp_limb_t *z3 = nqpqz2;
+ mp_limb_t *x = nqx;
+ mp_limb_t *z = nqz;
+ mp_limb_t *xprime = nqpqx;
+ mp_limb_t *zprime = nqpqz;
+ // fmonty(mp_limb_t *x2, mp_limb_t 0*z2, /* output 2Q */
+ // mp_limb_t *x3, mp_limb_t *z3, /* output Q + Q' */
+ // mp_limb_t *x, mp_limb_t *z, /* input Q */
+ // mp_limb_t *xprime, mp_limb_t *zprime, /* input Q' */
+ // const mp_limb_t *qmqp /* input Q - Q' */) {
+
+ mp_limb_t origx[modulus_limbs], origxprime[modulus_limbs], zzz[modulus_limbs], xx[modulus_limbs], zz[modulus_limbs], xxprime[modulus_limbs], zzprime[modulus_limbs], zzzprime[modulus_limbs];
+
+ for (size_t i = 0; i<modulus_limbs; i++) { origx[i] = x[i]; }
+ fe_add(x, x, z);
+ fe_sub(z, origx, z);
+
+ for (size_t i = 0; i<modulus_limbs; i++) { origxprime[i] = xprime[i]; }
+ fe_add(xprime, xprime, zprime);
+ fe_sub(zprime, origxprime, zprime);
+ fe_mul(xxprime, xprime, z);
+ fe_mul(zzprime, x, zprime);
+ for (size_t i = 0; i<modulus_limbs; i++) { origxprime[i] = xxprime[i]; }
+ fe_add(xxprime, xxprime, zzprime);
+ fe_sub(zzprime, origxprime, zzprime);
+ fe_sqr(x3, xxprime);
+ fe_sqr(zzzprime, zzprime);
+ fe_mul(z3, zzzprime, qmqp);
+
+ fe_sqr(xx, x);
+ fe_sqr(zz, z);
+ fe_mul(x2, xx, zz);
+ fe_sub(zz, xx, zz);
+ fe_mul(zzz, zz, a24);
+ fe_add(zzz, zzz, xx);
+ fe_mul(z2, zz, zzz);
+
+ // } fmonty
+
+ mpn_cnd_swap(bit, nqx2, nqpqx2, modulus_limbs);
+ mpn_cnd_swap(bit, nqz2, nqpqz2, modulus_limbs);
+
+ t = nqx;
+ nqx = nqx2;
+ nqx2 = t;
+ t = nqz;
+ nqz = nqz2;
+ nqz2 = t;
+ t = nqpqx;
+ nqpqx = nqpqx2;
+ nqpqx2 = t;
+ t = nqpqz;
+ nqpqz = nqpqz2;
+ nqpqz2 = t;
+
+ }
+
+ fe_inv(nqz, nqz);
+ fe_mul(nqx, nqx, nqz);
+
+ for (size_t i = 0; i < modulus_bytes; i++) { out[i] = 0; }
+ for (size_t i = 0; i < 8*modulus_bytes; i++) {
+ mp_limb_t bit = (nqx[i/GMP_LIMB_BITS] >> (i%GMP_LIMB_BITS))&1;
+ out [i/8] |= bit<<(i%8);
+ }
+}
+
+
+int main() {
+ // {
+ // uint8_t out[sizeof(modulus)] = {0};
+ // uint8_t point[sizeof(modulus)] = {9};
+ // uint8_t secret[sizeof(modulus)] = {1};
+ // crypto_scalarmult(out, point, secret, 256);
+ // printf("0x"); for (int i = 31; i>=0; --i) { printf("%02x", out[i]); }; printf("\n");
+ // }
+
+ {
+ const uint8_t expected[32] = {0x89, 0x16, 0x1f, 0xde, 0x88, 0x7b, 0x2b, 0x53, 0xde, 0x54, 0x9a, 0xf4, 0x83, 0x94, 0x01, 0x06, 0xec, 0xc1, 0x14, 0xd6, 0x98, 0x2d, 0xaa, 0x98, 0x25, 0x6d, 0xe2, 0x3b, 0xdf, 0x77, 0x66, 0x1a};
+ const uint8_t basepoint[32] = {9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+
+ uint8_t a[32] = {0}, b[32] = {0};
+ uint8_t* in = a;
+ uint8_t* out = b;
+ a[0] = 1;
+
+ for (int i = 0; i < 200; i++) {
+ in[0] &= 248;
+ in[31] &= 127;
+ in[31] |= 64;
+
+ crypto_scalarmult(out, in, 256, basepoint);
+ uint8_t* t = out;
+ out = in;
+ in = t;
+ }
+
+ for (int i = 0; i < 32; i++) {
+ if (in[i] != expected[i]) {
+ return (i+1);
+ }
+ }
+ return 0;
+ }
+}
diff --git a/src/Specific/Framework/bench/gmpxx.cpp b/src/Specific/Framework/bench/gmpxx.cpp
new file mode 100644
index 000000000..4710f8ad3
--- /dev/null
+++ b/src/Specific/Framework/bench/gmpxx.cpp
@@ -0,0 +1,129 @@
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <gmpxx.h>
+
+
+static const mpz_class q = (1_mpz<<255)-19;
+static const size_t modulus_bytes = 32;
+static const unsigned int a24 = 0x01db41;
+
+static void fe_print(const mpz_class &x) {
+ printf("0x"); for (size_t i = modulus_bytes-1; i<modulus_bytes; --i) { printf("%02x", mpz_class(x>>(8*i)).get_ui()&0xff); }
+}
+
+static void fe_print_frac(mpz_class x, mpz_class z) {
+ // remainder -> modulo
+ if (z < 0) { z += q; }
+ if (mpz_invert(z.get_mpz_t(), z.get_mpz_t(), q.get_mpz_t())) {
+ // remainder -> modulo
+ if (x < 0) { x += q; }
+ x = x*z % q;
+ fe_print(x);
+ } else {
+ printf("inf ");
+ }
+}
+
+using std::pair;
+using std::make_pair;
+static const pair<pair<mpz_class,mpz_class>, pair<mpz_class,mpz_class>>
+ladderstep(const mpz_class &x1, const mpz_class &x, const mpz_class &z, const mpz_class &x_p, const mpz_class &z_p) {
+ mpz_class t;
+ { t = x; mpz_class origx = t;
+ { t = (x + z)%q; mpz_class x = t;
+ { t = (origx - z)%q; mpz_class z = t;
+ { t = x_p; mpz_class origx_p = t;
+ { t = (x_p + z_p)%q; mpz_class x_p = t;
+ { t = (origx_p - z_p)%q; mpz_class z_p = t;
+ { t = (x_p * z)%q; mpz_class xx_p = t;
+ { t = (x * z_p)%q; mpz_class zz_p = t;
+ { t = xx_p; mpz_class origx_p = t;
+ { t = (xx_p + zz_p)%q; mpz_class xx_p = t;
+ { t = (origx_p - zz_p)%q; mpz_class zz_p = t;
+ { t = (xx_p*xx_p)%q; mpz_class x3 = t;
+ { t = (zz_p*zz_p)%q; mpz_class zzz_p = t;
+ { t = (zzz_p * x1)%q; mpz_class z3 = t;
+ { t = (x*x)%q; mpz_class xx = t;
+ { t = (z*z)%q; mpz_class zz = t;
+ { t = (xx * zz)%q; mpz_class x2 = t;
+ { t = (xx - zz)%q; mpz_class zz = t;
+ { t = (zz * a24)%q; mpz_class zzz = t;
+ { t = (zzz + xx)%q; mpz_class zzz = t;
+ { t = (zz * zzz)%q; mpz_class z2 = t;
+
+ return make_pair(make_pair(x2, z2), make_pair(x3, z3));
+ }}}}}}}}}}}}}}}}}}}}}
+}
+
+static void crypto_scalarmult(uint8_t *out, const uint8_t *secret, size_t secretbits, const uint8_t *point) {
+ mpz_class x1; for (size_t i = 0; i<modulus_bytes; i++) { x1 |= mpz_class(point[i]) << (8*i); }
+ mpz_class x = 1, z = 0, x_p = x1, z_p = 1;
+
+ bool swap = false;
+ for (size_t i = secretbits-1; i < secretbits; --i) {
+ bool bit = (secret[i/8] >> (i%8))&1;
+ // printf("%d ", bit); fe_print_frac(x, z); printf(" "); fe_print_frac(x_p, z_p); printf("\n");
+ if (swap ^ bit) { std::swap(x, x_p); std::swap(z, z_p); }
+ swap = bit;
+
+ auto pp = ladderstep(x1, x, z, x_p, z_p);
+ x = pp.first.first;
+ z = pp.first.second;
+ x_p = pp.second.first;
+ z_p = pp.second.second;
+ }
+ if (swap) { std::swap(x, x_p); std::swap(z, z_p); }
+
+ // remainder -> modulo
+ if (z < 0) { z += q; }
+
+ if (mpz_invert(z.get_mpz_t(), z.get_mpz_t(), q.get_mpz_t())) {
+ x = x*z % q;
+ } else {
+ x = 0;
+ }
+
+ // remainder -> modulo
+ if (x < 0) { x += q; }
+
+ for (size_t i = 0; i<modulus_bytes; i++) { out[i] = mpz_class(x>>(8*i)).get_ui()&0xff; }
+}
+
+int main() {
+ {
+ uint8_t out[modulus_bytes] = {0};
+ uint8_t point[modulus_bytes] = {9};
+ uint8_t secret[modulus_bytes] = {1};
+ crypto_scalarmult(out, secret, 256, point);
+ // printf("0x"); for (int i = 31; i>=0; --i) { printf("%02x", out[i]); }; printf("\n");
+ }
+ {
+ const uint8_t expected[32] = {0x89, 0x16, 0x1f, 0xde, 0x88, 0x7b, 0x2b, 0x53, 0xde, 0x54, 0x9a, 0xf4, 0x83, 0x94, 0x01, 0x06, 0xec, 0xc1, 0x14, 0xd6, 0x98, 0x2d, 0xaa, 0x98, 0x25, 0x6d, 0xe2, 0x3b, 0xdf, 0x77, 0x66, 0x1a};
+ const uint8_t basepoint[32] = {9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+
+ uint8_t a[32] = {0}, b[32] = {0};
+ uint8_t* in = a;
+ uint8_t* out = b;
+ a[0] = 1;
+
+ for (int i = 0; i < 200; i++) {
+ in[0] &= 248;
+ in[31] &= 127;
+ in[31] |= 64;
+
+ crypto_scalarmult(out, in, 256, basepoint);
+ uint8_t* t = out;
+ out = in;
+ in = t;
+ }
+
+ for (int i = 0; i < 32; i++) {
+ if (in[i] != expected[i]) {
+ return (i+1);
+ }
+ }
+ return 0;
+ }
+}
diff --git a/src/Specific/Framework/bench/montladder.py b/src/Specific/Framework/bench/montladder.py
new file mode 100644
index 000000000..da32c73d2
--- /dev/null
+++ b/src/Specific/Framework/bench/montladder.py
@@ -0,0 +1,57 @@
+q = 2**448 - 2**224 - 1
+modulus_bytes = 56
+a24 = 39081
+
+def ladderstep(x1, x, z, x_p, z_p):
+ origx = x
+ x = (x + z)%q
+ z = (origx - z)%q
+ origx_p = x_p
+ x_p = (x_p + z_p)%q
+ z_p = (origx_p - z_p)%q
+ xx_p = (x_p * z)%q
+ zz_p = (x * z_p)%q
+ origx_p = xx_p
+ xx_p = (xx_p + zz_p)%q
+ zz_p = (origx_p - zz_p)%q
+ x3 = (xx_p*xx_p)%q
+ zzz_p = (zz_p*zz_p)%q
+ z3 = (zzz_p * x1)%q
+ xx = (x*x)%q
+ zz = (z*z)%q
+ x2 = (xx * zz)%q
+ zz = (xx - zz)%q
+ zzz = (zz * a24)%q
+ zzz = (zzz + xx)%q
+ z2 = (zz * zzz)%q
+ return ((x2, z2), (x3, z3))
+
+def crypto_scalarmult(secret, secretbits, point):
+ x1 = sum(point[i] << (8*i) for i in range(modulus_bytes))
+ x = 1; z = 0; x_p = x1; z_p = 1;
+ swap = 0
+
+ i = secretbits - 1
+ while i >= 0:
+ bit = secret[i//8] >> (i%8)&1
+ # print(bit, x*pow(z,q-2,q)%q, x_p*pow(z_p,q-2,q)%q)
+ if swap ^ bit: ((x, z), (x_p, z_p)) = ((x_p, z_p), (x, z))
+ swap = bit
+ (x, z), (x_p, z_p) = ladderstep(x1, x, z, x_p, z_p)
+ i -= 1
+ if swap: ((x, z), (x_p, z_p)) = ((x_p, z_p), (x, z))
+ x = (x*pow(z,q-2,q))%q
+ return [((x >> (8*i)) & 0xff) for i in range(modulus_bytes)]
+
+if __name__ == '__main__':
+ BASE = [5]+(modulus_bytes-1)*[0]
+ print (crypto_scalarmult([1]+(modulus_bytes-1)*[0], 8*modulus_bytes, BASE))
+
+ s = [61, 38, 47, 221, 249, 236, 142, 136, 73, 82, 102, 254, 161, 154, 52, 210, 136, 130, 172, 239, 4, 81, 4, 208, 209, 170, 225, 33, 112, 10, 119, 156, 152, 76, 36, 248, 205, 215, 143, 191, 244, 73, 67, 235, 163, 104, 245, 75, 41, 37, 154, 79, 28, 96, 10, 211]
+ s[0] &= 252
+ s[55] |= 128
+
+ P = [6, 252, 230, 64, 250, 52, 135, 191, 218, 95, 108, 242, 213, 38, 63, 138, 173, 136, 51, 76, 189, 7, 67, 127, 2, 15, 8, 249, 129, 77, 192, 49, 221, 189, 195, 140, 25, 198, 218, 37, 131, 250, 84, 41, 219, 148, 173, 161, 138, 167, 167, 251, 78, 248, 160, 134]
+ Q = crypto_scalarmult(s, 8*modulus_bytes, P)
+ print(''.join(hex(i)[2:] for i in Q))
+ print(Q==[206, 62, 79, 249, 90, 96, 220, 102, 151, 218, 29, 177, 216, 94, 106, 251, 223, 121, 181, 10, 36, 18, 215, 84, 109, 95, 35, 159, 225, 79, 186, 173, 235, 68, 95, 198, 106, 1, 176, 119, 157, 152, 34, 57, 97, 17, 30, 33, 118, 98, 130, 247, 61, 217, 107, 111])
diff --git a/src/Specific/Framework/bench/prettyprint.py b/src/Specific/Framework/bench/prettyprint.py
new file mode 100755
index 000000000..f349cee5c
--- /dev/null
+++ b/src/Specific/Framework/bench/prettyprint.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+import re, sys
+
+def translate_typename(t):
+ return {
+ 'word8':'uint8_t',
+ 'word16':'uint16_t',
+ 'word32':'uint32_t',
+ 'word64':'uint64_t',
+ 'word128':'uint128_t',
+ 'word 8':'uint8_t',
+ 'word 16':'uint16_t',
+ 'word 32':'uint32_t',
+ 'word 64':'uint64_t',
+ 'word 128':'uint128_t'
+ }.get(t,t)
+
+def cleanup_type(t):
+ t = t.strip()
+ if t.startswith('ReturnType (') and t.endswith(')'):
+ t = t[len('ReturnType ('):-len(')')]
+ return t
+
+def translate_type(t):
+ t = cleanup_type(t)
+ fieldtypes = [s.strip() for s in t.split('*')]
+ assert len(set(fieldtypes)) == 1
+ return translate_typename(fieldtypes[0]), len(fieldtypes)
+
+
+funcname = sys.argv[1]
+_, _, _, binderline, *bodylines, returnline, _, typeline = sys.stdin.read().strip('\n').split('\n')
+
+*arguments, (returntype, returnlength) = [translate_type(s) for s in typeline.lstrip(": ").split('→')]
+
+binders = binderline.replace('λ','').replace('\'', '').replace('(','').replace(')','').replace('%core','').split(',')
+binders = [b.strip() for b in binders if b.strip()]
+
+returnline = returnline.strip()
+assert returnline.endswith(')') # I don't know why there is an extra paren on that line
+returnline = returnline[:-len(')')].strip()
+if returnline.startswith('return'):
+ returnline = returnline[len('return'):].strip()
+if returnline.startswith('(') and returnline.endswith(')'): # but only once... idk why
+ returnline = returnline[1:-1].strip()
+returneds = returnline.replace('Return','').split(',')
+returneds = [r.strip() for r in returneds if r.strip()]
+
+assert (len(binders) == sum(l for (_, l) in arguments))
+
+indent = ' '
+
+outparam = [(returntype, "out", returnlength)]
+inparams = [("const "+t, "in%d"%(i+1), l) for (i,(t,l)) in enumerate(arguments)]
+print ("static void "+funcname+"(" + ", ".join("%s %s[%d]"%x for x in (outparam+inparams)) + ") {")
+
+braces = 0
+for (b, (i, (t, a))) in (list(zip(binders,sum((list(reversed(list(enumerate(l*[(t,n)])))) for (t, n, l) in inparams), [])))):
+ print (indent+ "{ %s %s = %s[%d];"%(t, b, a, i))
+ braces += 1
+
+mulx_re = re.compile(r'^([^,]*),(\s*)([^ ]*) ([^ ]*)(.*)(mulx.*)\)([; ]*)$'); mulx_sub = r' \3 \4;\2\1\5_\6, &\4)\7'
+adc_re = re.compile(r'^([^,]*) ([^, ]*)(\s*),(.*)(addcarryx.*)\)([; ]*)$'); adc_sub = r'\1 \2\3;\4_\5, &\2)\6'
+sbb_re = re.compile(r'^([^,]*) ([^, ]*)(\s*),(.*)(subborrow.*)\)([; ]*)$'); sbb_sub = r'\1 \2\3;\4_\5, &\2)\6'
+for s in bodylines:
+ s = re.sub(mulx_re, mulx_sub, s)
+ s = re.sub(adc_re, adc_sub, s)
+ s = re.sub(sbb_re, sbb_sub, s)
+ print (indent+ "{"+s)
+ braces += 1
+
+for (i,r) in enumerate(reversed(returneds)):
+ print (indent+ "out[%d] = %s;"%(i, r))
+
+print (indent+'}'*braces)
+
+print ('}')