aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Andres Erbsen <andreser@mit.edu>2017-06-18 15:46:40 -0400
committerGravatar Andres Erbsen <andreser@mit.edu>2017-06-18 15:47:33 -0400
commit87bf48fccf89460b8264bb5cedf6b0e966dde563 (patch)
tree4222fdb4f6e3b496623b28a26e994f07154e1a47
parent989f6a49cb390b6279ee8ce0ed70eb993ff1b809 (diff)
compile X25519 C code from Makefile
-rw-r--r--Makefile17
-rw-r--r--_CoqProject12
-rwxr-xr-xextract-function-header.sh37
-rwxr-xr-xextract-function.sh71
-rw-r--r--measure.c108
-rw-r--r--src/Specific/IntegrationTestLadderstepDisplay.log370
-rw-r--r--src/Specific/IntegrationTestMulDisplay.log40
-rw-r--r--src/Specific/IntegrationTestSquareDisplay.log37
-rwxr-xr-xsrc/Specific/X25519/C64/compiler.sh4
-rw-r--r--src/Specific/X25519/C64/femul.v (renamed from src/Specific/IntegrationTestMul.v)0
-rw-r--r--src/Specific/X25519/C64/femulDisplay.v (renamed from src/Specific/IntegrationTestMulDisplay.v)2
-rw-r--r--src/Specific/X25519/C64/fesquare.v (renamed from src/Specific/IntegrationTestSquare.v)0
-rw-r--r--src/Specific/X25519/C64/fesquareDisplay.v (renamed from src/Specific/IntegrationTestSquareDisplay.v)2
-rw-r--r--src/Specific/X25519/C64/ladderstep.v (renamed from src/Specific/IntegrationTestLadderstep.v)0
-rw-r--r--src/Specific/X25519/C64/ladderstepDisplay.v (renamed from src/Specific/IntegrationTestLadderstepDisplay.v)2
-rw-r--r--src/Specific/X25519/C64/scalarmult.c309
16 files changed, 552 insertions, 459 deletions
diff --git a/Makefile b/Makefile
index 8dabc27e9..9d85766ba 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,5 @@
+.SUFFIXES:
+
MOD_NAME := Crypto
SRC_DIR := src
TIMED?=
@@ -13,7 +15,7 @@ INSTALLDEFAULTROOT := Crypto
.PHONY: coq clean update-_CoqProject cleanall install \
install-coqprime clean-coqprime coqprime \
- specific-display display \
+ specific-c specific-display display \
specific non-specific lite only-heavy printlite \
curves-proofs no-curves-proofs
@@ -92,6 +94,7 @@ only-heavy: $(HEAVY_VOFILES) coqprime
curves-proofs: $(CURVES_PROOFS_VOFILES) coqprime
no-curves-proofs: $(NO_CURVES_PROOFS_VOFILES) coqprime
specific-display: $(SPECIFIC_DISPLAY_VO:.vo=.log) coqprime
+specific-c: $(SPECIFIC_DISPLAY_VO:Display.vo=.c) coqprime
display: $(DISPLAY_VO:.vo=.log) coqprime
printlite::
@@ -132,12 +135,20 @@ $(DISPLAY_NON_JAVA_VO:.vo=.log) : %Display.log : %.vo %Display.v src/Compilers/Z
$(SHOW)"COQC $*Display > $@"
$(HIDE)$(COQC) $(COQDEBUG) $(COQFLAGS) $*Display.v > $@.tmp && mv -f $@.tmp $@
+$(DISPLAY_NON_JAVA_VO:Display.vo=.c) : %.c : %Display.log extract-function.sh
+ ./extract-function.sh $(patsubst %Display.log,%,$(notdir $<)) < $< > $@
+
+$(DISPLAY_NON_JAVA_VO:Display.vo=.h) : %.h : %Display.log extract-function-header.sh
+ ./extract-function-header.sh $(patsubst %Display.log,%,$(notdir $<)) < $< > $@
+
$(DISPLAY_JAVA_VO:.vo=.log) : %JavaDisplay.log : %.vo %JavaDisplay.v src/Compilers/Z/JavaNotations.vo src/Specific/IntegrationTestDisplayCommon.vo
$(SHOW)"COQC $*JavaDisplay > $@"
$(HIDE)$(COQC) $(COQDEBUG) $(COQFLAGS) $*JavaDisplay.v > $@.tmp && mv -f $@.tmp $@
-src/Specific/x25519_c64.c: src/Specific/x25519_c64.c.sh src/Specific/IntegrationTestLadderstepDisplay.log src/Specific/IntegrationTestMulDisplay.log src/Specific/IntegrationTestSquareDisplay.log
- bash src/Specific/x25519_c64.c.sh > src/Specific/x25519_c64.c
+DISPLAY_X25519_C64_VO := $(filter src/Specific/X25519/C64/%,$(DISPLAY_NON_JAVA_VO))
+
+src/Specific/X25519/C64/measure: src/Specific/X25519/C64/compiler.sh measure.c $(DISPLAY_X25519_C64_VO:Display.vo=.c) $(DISPLAY_X25519_C64_VO:Display.vo=.h) src/Specific/X25519/C64/scalarmult.c
+ src/Specific/X25519/C64/compiler.sh -o src/Specific/X25519/C64/measure -I src/Specific/X25519/C64/ measure.c $(DISPLAY_X25519_C64_VO:Display.vo=.c) src/Specific/X25519/C64/scalarmult.c -D TIMINGS=2047 -D UUT=crypto_scalarmult_bench
clean::
rm -f Makefile.coq
diff --git a/_CoqProject b/_CoqProject
index 54d4958b2..f82134709 100644
--- a/_CoqProject
+++ b/_CoqProject
@@ -221,18 +221,12 @@ src/Specific/IntegrationTestFreeze.v
src/Specific/IntegrationTestFreezeDisplay.v
src/Specific/IntegrationTestKaratsubaMul.v
src/Specific/IntegrationTestKaratsubaMulDisplay.v
-src/Specific/IntegrationTestLadderstep.v
src/Specific/IntegrationTestLadderstep130.v
src/Specific/IntegrationTestLadderstep130Display.v
-src/Specific/IntegrationTestLadderstepDisplay.v
src/Specific/IntegrationTestMontgomeryP256.v
src/Specific/IntegrationTestMontgomeryP256Display.v
src/Specific/IntegrationTestMontgomeryP256_128.v
src/Specific/IntegrationTestMontgomeryP256_128Display.v
-src/Specific/IntegrationTestMul.v
-src/Specific/IntegrationTestMulDisplay.v
-src/Specific/IntegrationTestSquare.v
-src/Specific/IntegrationTestSquareDisplay.v
src/Specific/IntegrationTestSub.v
src/Specific/IntegrationTestSubDisplay.v
src/Specific/IntegrationTestTemporaryMiscCommon.v
@@ -242,6 +236,12 @@ src/Specific/MontgomeryP256_128.v
src/Specific/FancyMachine256/Barrett.v
src/Specific/FancyMachine256/Core.v
src/Specific/FancyMachine256/Montgomery.v
+src/Specific/X25519/C64/femul.v
+src/Specific/X25519/C64/femulDisplay.v
+src/Specific/X25519/C64/fesquare.v
+src/Specific/X25519/C64/fesquareDisplay.v
+src/Specific/X25519/C64/ladderstep.v
+src/Specific/X25519/C64/ladderstepDisplay.v
src/Util/AdditionChainExponentiation.v
src/Util/AutoRewrite.v
src/Util/Bool.v
diff --git a/extract-function-header.sh b/extract-function-header.sh
new file mode 100755
index 000000000..e0db91ac9
--- /dev/null
+++ b/extract-function-header.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+case "$#" in
+ 0)
+ funcname=f
+ ;;
+ 1)
+ funcname="$1"
+ ;;
+ *)
+ exit 111
+ ;;
+esac
+
+cat <<"EOF"
+#include <stdint.h>
+
+#undef force_inline
+#define force_inline __attribute__((always_inline))
+
+EOF
+
+while IFS= read -r line; do
+ case "$line" in
+ *"λ '"*)
+ echo -n "void force_inline $funcname("
+ echo -n "uint64_t* out"
+ echo "$line" | grep -owP -- '\w+\d+' | \
+ while IFS= read -r arg; do
+ echo -n ", uint64_t $arg"
+ done
+ echo ');'
+ break
+ ;;
+ esac
+done
+
diff --git a/extract-function.sh b/extract-function.sh
new file mode 100755
index 000000000..4056f242c
--- /dev/null
+++ b/extract-function.sh
@@ -0,0 +1,71 @@
+#!/bin/sh
+
+case "$#" in
+ 0)
+ funcname=f
+ ;;
+ 1)
+ funcname="$1"
+ ;;
+ *)
+ exit 111
+ ;;
+esac
+
+cat <<EOF
+#include <stdint.h>
+#include <stdbool.h>
+#include <x86intrin.h>
+
+#include "$funcname.h"
+
+typedef unsigned int uint128_t __attribute__((mode(TI)));
+
+#undef force_inline
+#define force_inline __attribute__((always_inline))
+
+EOF
+
+lines=0
+show=false
+while IFS= read -r line; do
+ case "$line" in
+ *"λ '"*)
+ echo -n "void force_inline $funcname("
+ echo -n "uint64_t* out"
+ echo "$line" | grep -owP -- '\w+\d+' | \
+ while IFS= read -r arg; do
+ echo -n ", uint64_t $arg"
+ done
+ echo ')'
+ show=true
+ ;;
+ *"Return "*|*"return "*)
+ i=0
+ echo "$line" | \
+ sed 's:return::g' | sed 's:Return::g' | tr -d '(' | tr -d ')' | tr , '\n' | sed 's/^\s\+//g' | \
+ ( while IFS= read -r ret; do
+ echo "out[$i] = $ret;"
+ ((i++))
+ done;
+ seq 2 "$lines" | while IFS= read -r _; do
+ echo -n "}"
+ done
+ echo "}"
+ echo "// caller: uint64_t out[$i];" )
+ show=false
+ break
+ ;;
+ *)
+ case "$show" in
+ true)
+ ((lines++))
+ echo "{ $line" | \
+ sed s':^\([^,]*\) \([^, ]*\)\(\s*\),\(.*\)\(addcarryx.*\))\([; ]*\)$:\1 \2\3;\4_\5, \&\2)\6:' | \
+ sed s':^\([^,]*\) \([^, ]*\)\(\s*\),\(.*\)\(subborrow.*\))\([; ]*\)$:\1 \2\3;\4_\5, \&\2)\6:'
+ ;;
+ esac
+ ;;
+ esac
+done
+
diff --git a/measure.c b/measure.c
new file mode 100644
index 000000000..91a4aa82a
--- /dev/null
+++ b/measure.c
@@ -0,0 +1,108 @@
+/*
+ * Benchmarking utilities extracted from SUPERCOP by Andres Erbsen
+ * based on measure-anything.c version 20120328 and measure.c
+ * by D. J. Bernstein
+ * Public domain.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+
+// cpucycles_amd64cpuinfo
+long long cpucycles(void)
+{
+ unsigned long long result;
+ asm volatile(".byte 15;.byte 49;shlq $32,%%rdx;orq %%rdx,%%rax"
+ : "=a" (result) :: "%rdx");
+ return result;
+}
+
+// SUPERCOP randombytes
+static uint32_t seed[32] = { 3,1,4,1,5,9,2,6,5,3,5,8,9,7,9,3,2,3,8,4,6,2,6,4,3,3,8,3,2,7,9,5 } ;
+static uint32_t in[12];
+static uint32_t out[8];
+static int outleft = 0;
+static void surf(void)
+{
+ #define ROTATE(x,b) (((x) << (b)) | ((x) >> (32 - (b))))
+ #define MUSH(i,b) x = t[i] += (((x ^ seed[i]) + sum) ^ ROTATE(x,b));
+ uint32_t t[12]; uint32_t x; uint32_t sum = 0;
+ int r; int i; int loop;
+
+ for (i = 0;i < 12;++i) t[i] = in[i] ^ seed[12 + i];
+ for (i = 0;i < 8;++i) out[i] = seed[24 + i];
+ x = t[11];
+ for (loop = 0;loop < 2;++loop) {
+ for (r = 0;r < 16;++r) {
+ sum += 0x9e3779b9;
+ MUSH(0,5) MUSH(1,7) MUSH(2,9) MUSH(3,13)
+ MUSH(4,5) MUSH(5,7) MUSH(6,9) MUSH(7,13)
+ MUSH(8,5) MUSH(9,7) MUSH(10,9) MUSH(11,13)
+ }
+ for (i = 0;i < 8;++i) out[i] ^= t[i + 4];
+ }
+ #undef ROTATE
+ #undef MUSH
+}
+void randombytes(unsigned char *x,unsigned long long xlen)
+{
+ while (xlen > 0) {
+ if (!outleft) {
+ if (!++in[0]) if (!++in[1]) if (!++in[2]) ++in[3];
+ surf();
+ outleft = 8;
+ }
+ *x = out[--outleft];
+ ++x;
+ --xlen;
+ }
+}
+
+// SUPERCOP limits
+void limits()
+{
+#ifdef RLIM_INFINITY
+ struct rlimit r;
+ r.rlim_cur = 0;
+ r.rlim_max = 0;
+#ifdef RLIMIT_NOFILE
+ setrlimit(RLIMIT_NOFILE,&r);
+#endif
+#ifdef RLIMIT_NPROC
+ setrlimit(RLIMIT_NPROC,&r);
+#endif
+#ifdef RLIMIT_CORE
+ setrlimit(RLIMIT_CORE,&r);
+#endif
+#endif
+}
+
+void UUT(unsigned char*);
+
+void measure(void)
+{
+ unsigned char *buf = aligned_alloc(64, 1024);
+ static long long cycles[TIMINGS + 1];
+
+ for (int i = 0;i <= TIMINGS;++i) {
+ cycles[i] = cpucycles();
+ UUT(buf);
+ }
+ for (int i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i];
+ for (int i = 0;i < TIMINGS;++i) printf("%lld\n", cycles[i]);
+
+ __asm__ __volatile__("" :: "m" (buf)); // do not optimize buf away
+}
+
+int main()
+{
+ limits();
+ measure();
+ return 0;
+}
diff --git a/src/Specific/IntegrationTestLadderstepDisplay.log b/src/Specific/IntegrationTestLadderstepDisplay.log
deleted file mode 100644
index 73c47fa40..000000000
--- a/src/Specific/IntegrationTestLadderstepDisplay.log
+++ /dev/null
@@ -1,370 +0,0 @@
-λ x x0 x1 x2 x3 : word64 * word64 * word64 * word64 * word64,
-let (a, b) := Interp-η
-(λ var : Syntax.base_type → Type,
- λ '(x15, x16, x14, x12, x10, (x25, x26, x24, x22, x20, (x33, x34, x32, x30, x28)), (x43, x44, x42, x40, x38, (x51, x52, x50, x48, x46)))%core,
- uint64_t x53 = x25 + x33;
- uint64_t x54 = x26 + x34;
- uint64_t x55 = x24 + x32;
- uint64_t x56 = x22 + x30;
- uint64_t x57 = x20 + x28;
- uint64_t x58 = 0xffffffffffffe + x25 - x33;
- uint64_t x59 = 0xffffffffffffe + x26 - x34;
- uint64_t x60 = 0xffffffffffffe + x24 - x32;
- uint64_t x61 = 0xffffffffffffe + x22 - x30;
- uint64_t x62 = 0xfffffffffffda + x20 - x28;
- uint64_t x63 = x43 + x51;
- uint64_t x64 = x44 + x52;
- uint64_t x65 = x42 + x50;
- uint64_t x66 = x40 + x48;
- uint64_t x67 = x38 + x46;
- uint64_t x68 = 0xffffffffffffe + x43 - x51;
- uint64_t x69 = 0xffffffffffffe + x44 - x52;
- uint64_t x70 = 0xffffffffffffe + x42 - x50;
- uint64_t x71 = 0xffffffffffffe + x40 - x48;
- uint64_t x72 = 0xfffffffffffda + x38 - x46;
- uint128_t x73 = (uint128_t) x67 * x62;
- uint128_t x74 = (uint128_t) x67 * x61 + (uint128_t) x66 * x62;
- uint128_t x75 = (uint128_t) x67 * x60 + (uint128_t) x65 * x62 + (uint128_t) x66 * x61;
- uint128_t x76 = (uint128_t) x67 * x59 + (uint128_t) x64 * x62 + (uint128_t) x66 * x60 + (uint128_t) x65 * x61;
- uint128_t x77 = (uint128_t) x67 * x58 + (uint128_t) x63 * x62 + (uint128_t) x64 * x61 + (uint128_t) x66 * x59 + (uint128_t) x65 * x60;
- uint64_t x78 = x63 * 0x13;
- uint64_t x79 = x66 * 0x13;
- uint64_t x80 = x65 * 0x13;
- uint64_t x81 = x64 * 0x13;
- uint128_t x82 = x73 + (uint128_t) x78 * x61 + (uint128_t) x79 * x58 + (uint128_t) x80 * x59 + (uint128_t) x81 * x60;
- uint128_t x83 = x74 + (uint128_t) x78 * x60 + (uint128_t) x80 * x58 + (uint128_t) x81 * x59;
- uint128_t x84 = x75 + (uint128_t) x78 * x59 + (uint128_t) x81 * x58;
- uint128_t x85 = x76 + (uint128_t) x78 * x58;
- uint64_t x86 = (uint64_t) (x82 >> 0x33);
- uint64_t x87 = (uint64_t) x82 & 0x7ffffffffffff;
- uint128_t x88 = x86 + x83;
- uint64_t x89 = (uint64_t) (x88 >> 0x33);
- uint64_t x90 = (uint64_t) x88 & 0x7ffffffffffff;
- uint128_t x91 = x89 + x84;
- uint64_t x92 = (uint64_t) (x91 >> 0x33);
- uint64_t x93 = (uint64_t) x91 & 0x7ffffffffffff;
- uint128_t x94 = x92 + x85;
- uint64_t x95 = (uint64_t) (x94 >> 0x33);
- uint64_t x96 = (uint64_t) x94 & 0x7ffffffffffff;
- uint128_t x97 = x95 + x77;
- uint64_t x98 = (uint64_t) (x97 >> 0x33);
- uint64_t x99 = (uint64_t) x97 & 0x7ffffffffffff;
- uint64_t x100 = x87 + 0x13 * x98;
- uint64_t x101 = x100 >> 0x33;
- uint64_t x102 = x100 & 0x7ffffffffffff;
- uint64_t x103 = x101 + x90;
- uint64_t x104 = x103 >> 0x33;
- uint64_t x105 = x103 & 0x7ffffffffffff;
- uint64_t x106 = x104 + x93;
- uint128_t x107 = (uint128_t) x57 * x72;
- uint128_t x108 = (uint128_t) x57 * x71 + (uint128_t) x56 * x72;
- uint128_t x109 = (uint128_t) x57 * x70 + (uint128_t) x55 * x72 + (uint128_t) x56 * x71;
- uint128_t x110 = (uint128_t) x57 * x69 + (uint128_t) x54 * x72 + (uint128_t) x56 * x70 + (uint128_t) x55 * x71;
- uint128_t x111 = (uint128_t) x57 * x68 + (uint128_t) x53 * x72 + (uint128_t) x54 * x71 + (uint128_t) x56 * x69 + (uint128_t) x55 * x70;
- uint64_t x112 = x53 * 0x13;
- uint64_t x113 = x56 * 0x13;
- uint64_t x114 = x55 * 0x13;
- uint64_t x115 = x54 * 0x13;
- uint128_t x116 = x107 + (uint128_t) x112 * x71 + (uint128_t) x113 * x68 + (uint128_t) x114 * x69 + (uint128_t) x115 * x70;
- uint128_t x117 = x108 + (uint128_t) x112 * x70 + (uint128_t) x114 * x68 + (uint128_t) x115 * x69;
- uint128_t x118 = x109 + (uint128_t) x112 * x69 + (uint128_t) x115 * x68;
- uint128_t x119 = x110 + (uint128_t) x112 * x68;
- uint64_t x120 = (uint64_t) (x116 >> 0x33);
- uint64_t x121 = (uint64_t) x116 & 0x7ffffffffffff;
- uint128_t x122 = x120 + x117;
- uint64_t x123 = (uint64_t) (x122 >> 0x33);
- uint64_t x124 = (uint64_t) x122 & 0x7ffffffffffff;
- uint128_t x125 = x123 + x118;
- uint64_t x126 = (uint64_t) (x125 >> 0x33);
- uint64_t x127 = (uint64_t) x125 & 0x7ffffffffffff;
- uint128_t x128 = x126 + x119;
- uint64_t x129 = (uint64_t) (x128 >> 0x33);
- uint64_t x130 = (uint64_t) x128 & 0x7ffffffffffff;
- uint128_t x131 = x129 + x111;
- uint64_t x132 = (uint64_t) (x131 >> 0x33);
- uint64_t x133 = (uint64_t) x131 & 0x7ffffffffffff;
- uint64_t x134 = x121 + 0x13 * x132;
- uint64_t x135 = x134 >> 0x33;
- uint64_t x136 = x134 & 0x7ffffffffffff;
- uint64_t x137 = x135 + x124;
- uint64_t x138 = x137 >> 0x33;
- uint64_t x139 = x137 & 0x7ffffffffffff;
- uint64_t x140 = x138 + x127;
- uint64_t x141 = x99 + x133;
- uint64_t x142 = x96 + x130;
- uint64_t x143 = x106 + x140;
- uint64_t x144 = x105 + x139;
- uint64_t x145 = x102 + x136;
- uint64_t x146 = 0xffffffffffffe + x99 - x133;
- uint64_t x147 = 0xffffffffffffe + x96 - x130;
- uint64_t x148 = 0xffffffffffffe + x106 - x140;
- uint64_t x149 = 0xffffffffffffe + x105 - x139;
- uint64_t x150 = 0xfffffffffffda + x102 - x136;
- uint64_t x151 = x145 * 0x2;
- uint64_t x152 = x144 * 0x2;
- uint64_t x153 = x143 * 0x2 * 0x13;
- uint64_t x154 = x141 * 0x13;
- uint64_t x155 = x154 * 0x2;
- uint128_t x156 = (uint128_t) x145 * x145 + (uint128_t) x155 * x144 + (uint128_t) x153 * x142;
- uint128_t x157 = (uint128_t) x151 * x144 + (uint128_t) x155 * x143 + (uint128_t) x142 * (x142 * 0x13);
- uint128_t x158 = (uint128_t) x151 * x143 + (uint128_t) x144 * x144 + (uint128_t) x155 * x142;
- uint128_t x159 = (uint128_t) x151 * x142 + (uint128_t) x152 * x143 + (uint128_t) x141 * x154;
- uint128_t x160 = (uint128_t) x151 * x141 + (uint128_t) x152 * x142 + (uint128_t) x143 * x143;
- uint64_t x161 = (uint64_t) (x156 >> 0x33);
- uint64_t x162 = (uint64_t) x156 & 0x7ffffffffffff;
- uint128_t x163 = x161 + x157;
- uint64_t x164 = (uint64_t) (x163 >> 0x33);
- uint64_t x165 = (uint64_t) x163 & 0x7ffffffffffff;
- uint128_t x166 = x164 + x158;
- uint64_t x167 = (uint64_t) (x166 >> 0x33);
- uint64_t x168 = (uint64_t) x166 & 0x7ffffffffffff;
- uint128_t x169 = x167 + x159;
- uint64_t x170 = (uint64_t) (x169 >> 0x33);
- uint64_t x171 = (uint64_t) x169 & 0x7ffffffffffff;
- uint128_t x172 = x170 + x160;
- uint64_t x173 = (uint64_t) (x172 >> 0x33);
- uint64_t x174 = (uint64_t) x172 & 0x7ffffffffffff;
- uint64_t x175 = x162 + 0x13 * x173;
- uint64_t x176 = x175 >> 0x33;
- uint64_t x177 = x175 & 0x7ffffffffffff;
- uint64_t x178 = x176 + x165;
- uint64_t x179 = x178 >> 0x33;
- uint64_t x180 = x178 & 0x7ffffffffffff;
- uint64_t x181 = x179 + x168;
- uint64_t x182 = x150 * 0x2;
- uint64_t x183 = x149 * 0x2;
- uint64_t x184 = x148 * 0x2 * 0x13;
- uint64_t x185 = x146 * 0x13;
- uint64_t x186 = x185 * 0x2;
- uint128_t x187 = (uint128_t) x150 * x150 + (uint128_t) x186 * x149 + (uint128_t) x184 * x147;
- uint128_t x188 = (uint128_t) x182 * x149 + (uint128_t) x186 * x148 + (uint128_t) x147 * (x147 * 0x13);
- uint128_t x189 = (uint128_t) x182 * x148 + (uint128_t) x149 * x149 + (uint128_t) x186 * x147;
- uint128_t x190 = (uint128_t) x182 * x147 + (uint128_t) x183 * x148 + (uint128_t) x146 * x185;
- uint128_t x191 = (uint128_t) x182 * x146 + (uint128_t) x183 * x147 + (uint128_t) x148 * x148;
- uint64_t x192 = (uint64_t) (x187 >> 0x33);
- uint64_t x193 = (uint64_t) x187 & 0x7ffffffffffff;
- uint128_t x194 = x192 + x188;
- uint64_t x195 = (uint64_t) (x194 >> 0x33);
- uint64_t x196 = (uint64_t) x194 & 0x7ffffffffffff;
- uint128_t x197 = x195 + x189;
- uint64_t x198 = (uint64_t) (x197 >> 0x33);
- uint64_t x199 = (uint64_t) x197 & 0x7ffffffffffff;
- uint128_t x200 = x198 + x190;
- uint64_t x201 = (uint64_t) (x200 >> 0x33);
- uint64_t x202 = (uint64_t) x200 & 0x7ffffffffffff;
- uint128_t x203 = x201 + x191;
- uint64_t x204 = (uint64_t) (x203 >> 0x33);
- uint64_t x205 = (uint64_t) x203 & 0x7ffffffffffff;
- uint64_t x206 = x193 + 0x13 * x204;
- uint64_t x207 = x206 >> 0x33;
- uint64_t x208 = x206 & 0x7ffffffffffff;
- uint64_t x209 = x207 + x196;
- uint64_t x210 = x209 >> 0x33;
- uint64_t x211 = x209 & 0x7ffffffffffff;
- uint64_t x212 = x210 + x199;
- uint128_t x213 = (uint128_t) x208 * x10;
- uint128_t x214 = (uint128_t) x208 * x12 + (uint128_t) x211 * x10;
- uint128_t x215 = (uint128_t) x208 * x14 + (uint128_t) x212 * x10 + (uint128_t) x211 * x12;
- uint128_t x216 = (uint128_t) x208 * x16 + (uint128_t) x202 * x10 + (uint128_t) x211 * x14 + (uint128_t) x212 * x12;
- uint128_t x217 = (uint128_t) x208 * x15 + (uint128_t) x205 * x10 + (uint128_t) x202 * x12 + (uint128_t) x211 * x16 + (uint128_t) x212 * x14;
- uint64_t x218 = x205 * 0x13;
- uint64_t x219 = x211 * 0x13;
- uint64_t x220 = x212 * 0x13;
- uint64_t x221 = x202 * 0x13;
- uint128_t x222 = x213 + (uint128_t) x218 * x12 + (uint128_t) x219 * x15 + (uint128_t) x220 * x16 + (uint128_t) x221 * x14;
- uint128_t x223 = x214 + (uint128_t) x218 * x14 + (uint128_t) x220 * x15 + (uint128_t) x221 * x16;
- uint128_t x224 = x215 + (uint128_t) x218 * x16 + (uint128_t) x221 * x15;
- uint128_t x225 = x216 + (uint128_t) x218 * x15;
- uint64_t x226 = (uint64_t) (x222 >> 0x33);
- uint64_t x227 = (uint64_t) x222 & 0x7ffffffffffff;
- uint128_t x228 = x226 + x223;
- uint64_t x229 = (uint64_t) (x228 >> 0x33);
- uint64_t x230 = (uint64_t) x228 & 0x7ffffffffffff;
- uint128_t x231 = x229 + x224;
- uint64_t x232 = (uint64_t) (x231 >> 0x33);
- uint64_t x233 = (uint64_t) x231 & 0x7ffffffffffff;
- uint128_t x234 = x232 + x225;
- uint64_t x235 = (uint64_t) (x234 >> 0x33);
- uint64_t x236 = (uint64_t) x234 & 0x7ffffffffffff;
- uint128_t x237 = x235 + x217;
- uint64_t x238 = (uint64_t) (x237 >> 0x33);
- uint64_t x239 = (uint64_t) x237 & 0x7ffffffffffff;
- uint64_t x240 = x227 + 0x13 * x238;
- uint64_t x241 = x240 >> 0x33;
- uint64_t x242 = x240 & 0x7ffffffffffff;
- uint64_t x243 = x241 + x230;
- uint64_t x244 = x243 >> 0x33;
- uint64_t x245 = x243 & 0x7ffffffffffff;
- uint64_t x246 = x244 + x233;
- uint64_t x247 = x57 * 0x2;
- uint64_t x248 = x56 * 0x2;
- uint64_t x249 = x55 * 0x2 * 0x13;
- uint64_t x250 = x53 * 0x13;
- uint64_t x251 = x250 * 0x2;
- uint128_t x252 = (uint128_t) x57 * x57 + (uint128_t) x251 * x56 + (uint128_t) x249 * x54;
- uint128_t x253 = (uint128_t) x247 * x56 + (uint128_t) x251 * x55 + (uint128_t) x54 * (x54 * 0x13);
- uint128_t x254 = (uint128_t) x247 * x55 + (uint128_t) x56 * x56 + (uint128_t) x251 * x54;
- uint128_t x255 = (uint128_t) x247 * x54 + (uint128_t) x248 * x55 + (uint128_t) x53 * x250;
- uint128_t x256 = (uint128_t) x247 * x53 + (uint128_t) x248 * x54 + (uint128_t) x55 * x55;
- uint64_t x257 = (uint64_t) (x252 >> 0x33);
- uint64_t x258 = (uint64_t) x252 & 0x7ffffffffffff;
- uint128_t x259 = x257 + x253;
- uint64_t x260 = (uint64_t) (x259 >> 0x33);
- uint64_t x261 = (uint64_t) x259 & 0x7ffffffffffff;
- uint128_t x262 = x260 + x254;
- uint64_t x263 = (uint64_t) (x262 >> 0x33);
- uint64_t x264 = (uint64_t) x262 & 0x7ffffffffffff;
- uint128_t x265 = x263 + x255;
- uint64_t x266 = (uint64_t) (x265 >> 0x33);
- uint64_t x267 = (uint64_t) x265 & 0x7ffffffffffff;
- uint128_t x268 = x266 + x256;
- uint64_t x269 = (uint64_t) (x268 >> 0x33);
- uint64_t x270 = (uint64_t) x268 & 0x7ffffffffffff;
- uint64_t x271 = x258 + 0x13 * x269;
- uint64_t x272 = x271 >> 0x33;
- uint64_t x273 = x271 & 0x7ffffffffffff;
- uint64_t x274 = x272 + x261;
- uint64_t x275 = x274 >> 0x33;
- uint64_t x276 = x274 & 0x7ffffffffffff;
- uint64_t x277 = x275 + x264;
- uint64_t x278 = x62 * 0x2;
- uint64_t x279 = x61 * 0x2;
- uint64_t x280 = x60 * 0x2 * 0x13;
- uint64_t x281 = x58 * 0x13;
- uint64_t x282 = x281 * 0x2;
- uint128_t x283 = (uint128_t) x62 * x62 + (uint128_t) x282 * x61 + (uint128_t) x280 * x59;
- uint128_t x284 = (uint128_t) x278 * x61 + (uint128_t) x282 * x60 + (uint128_t) x59 * (x59 * 0x13);
- uint128_t x285 = (uint128_t) x278 * x60 + (uint128_t) x61 * x61 + (uint128_t) x282 * x59;
- uint128_t x286 = (uint128_t) x278 * x59 + (uint128_t) x279 * x60 + (uint128_t) x58 * x281;
- uint128_t x287 = (uint128_t) x278 * x58 + (uint128_t) x279 * x59 + (uint128_t) x60 * x60;
- uint64_t x288 = (uint64_t) (x283 >> 0x33);
- uint64_t x289 = (uint64_t) x283 & 0x7ffffffffffff;
- uint128_t x290 = x288 + x284;
- uint64_t x291 = (uint64_t) (x290 >> 0x33);
- uint64_t x292 = (uint64_t) x290 & 0x7ffffffffffff;
- uint128_t x293 = x291 + x285;
- uint64_t x294 = (uint64_t) (x293 >> 0x33);
- uint64_t x295 = (uint64_t) x293 & 0x7ffffffffffff;
- uint128_t x296 = x294 + x286;
- uint64_t x297 = (uint64_t) (x296 >> 0x33);
- uint64_t x298 = (uint64_t) x296 & 0x7ffffffffffff;
- uint128_t x299 = x297 + x287;
- uint64_t x300 = (uint64_t) (x299 >> 0x33);
- uint64_t x301 = (uint64_t) x299 & 0x7ffffffffffff;
- uint64_t x302 = x289 + 0x13 * x300;
- uint64_t x303 = x302 >> 0x33;
- uint64_t x304 = x302 & 0x7ffffffffffff;
- uint64_t x305 = x303 + x292;
- uint64_t x306 = x305 >> 0x33;
- uint64_t x307 = x305 & 0x7ffffffffffff;
- uint64_t x308 = x306 + x295;
- uint128_t x309 = (uint128_t) x273 * x304;
- uint128_t x310 = (uint128_t) x273 * x307 + (uint128_t) x276 * x304;
- uint128_t x311 = (uint128_t) x273 * x308 + (uint128_t) x277 * x304 + (uint128_t) x276 * x307;
- uint128_t x312 = (uint128_t) x273 * x298 + (uint128_t) x267 * x304 + (uint128_t) x276 * x308 + (uint128_t) x277 * x307;
- uint128_t x313 = (uint128_t) x273 * x301 + (uint128_t) x270 * x304 + (uint128_t) x267 * x307 + (uint128_t) x276 * x298 + (uint128_t) x277 * x308;
- uint64_t x314 = x270 * 0x13;
- uint64_t x315 = x276 * 0x13;
- uint64_t x316 = x277 * 0x13;
- uint64_t x317 = x267 * 0x13;
- uint128_t x318 = x309 + (uint128_t) x314 * x307 + (uint128_t) x315 * x301 + (uint128_t) x316 * x298 + (uint128_t) x317 * x308;
- uint128_t x319 = x310 + (uint128_t) x314 * x308 + (uint128_t) x316 * x301 + (uint128_t) x317 * x298;
- uint128_t x320 = x311 + (uint128_t) x314 * x298 + (uint128_t) x317 * x301;
- uint128_t x321 = x312 + (uint128_t) x314 * x301;
- uint64_t x322 = (uint64_t) (x318 >> 0x33);
- uint64_t x323 = (uint64_t) x318 & 0x7ffffffffffff;
- uint128_t x324 = x322 + x319;
- uint64_t x325 = (uint64_t) (x324 >> 0x33);
- uint64_t x326 = (uint64_t) x324 & 0x7ffffffffffff;
- uint128_t x327 = x325 + x320;
- uint64_t x328 = (uint64_t) (x327 >> 0x33);
- uint64_t x329 = (uint64_t) x327 & 0x7ffffffffffff;
- uint128_t x330 = x328 + x321;
- uint64_t x331 = (uint64_t) (x330 >> 0x33);
- uint64_t x332 = (uint64_t) x330 & 0x7ffffffffffff;
- uint128_t x333 = x331 + x313;
- uint64_t x334 = (uint64_t) (x333 >> 0x33);
- uint64_t x335 = (uint64_t) x333 & 0x7ffffffffffff;
- uint64_t x336 = x323 + 0x13 * x334;
- uint64_t x337 = x336 >> 0x33;
- uint64_t x338 = x336 & 0x7ffffffffffff;
- uint64_t x339 = x337 + x326;
- uint64_t x340 = x339 >> 0x33;
- uint64_t x341 = x339 & 0x7ffffffffffff;
- uint64_t x342 = x340 + x329;
- uint64_t x343 = 0xffffffffffffe + x270 - x301;
- uint64_t x344 = 0xffffffffffffe + x267 - x298;
- uint64_t x345 = 0xffffffffffffe + x277 - x308;
- uint64_t x346 = 0xffffffffffffe + x276 - x307;
- uint64_t x347 = 0xfffffffffffda + x273 - x304;
- uint128_t x348 = (uint128_t) x347 * 0x1db41;
- uint128_t x349 = (uint128_t) x346 * 0x1db41;
- uint128_t x350 = (uint128_t) x345 * 0x1db41;
- uint128_t x351 = (uint128_t) x344 * 0x1db41;
- uint128_t x352 = (uint128_t) x343 * 0x1db41;
- uint64_t x353 = (uint64_t) (x348 >> 0x33);
- uint64_t x354 = (uint64_t) x348 & 0x7ffffffffffff;
- uint128_t x355 = x353 + x349;
- uint64_t x356 = (uint64_t) (x355 >> 0x33);
- uint64_t x357 = (uint64_t) x355 & 0x7ffffffffffff;
- uint128_t x358 = x356 + x350;
- uint64_t x359 = (uint64_t) (x358 >> 0x33);
- uint64_t x360 = (uint64_t) x358 & 0x7ffffffffffff;
- uint128_t x361 = x359 + x351;
- uint64_t x362 = (uint64_t) (x361 >> 0x33);
- uint64_t x363 = (uint64_t) x361 & 0x7ffffffffffff;
- uint128_t x364 = x362 + x352;
- uint64_t x365 = (uint64_t) (x364 >> 0x33);
- uint64_t x366 = (uint64_t) x364 & 0x7ffffffffffff;
- uint64_t x367 = x354 + 0x13 * x365;
- uint64_t x368 = x367 >> 0x33;
- uint64_t x369 = x367 & 0x7ffffffffffff;
- uint64_t x370 = x368 + x357;
- uint64_t x371 = x370 >> 0x33;
- uint64_t x372 = x370 & 0x7ffffffffffff;
- uint64_t x373 = x371 + x360;
- uint64_t x374 = x366 + x270;
- uint64_t x375 = x363 + x267;
- uint64_t x376 = x373 + x277;
- uint64_t x377 = x372 + x276;
- uint64_t x378 = x369 + x273;
- uint128_t x379 = (uint128_t) x347 * x378;
- uint128_t x380 = (uint128_t) x347 * x377 + (uint128_t) x346 * x378;
- uint128_t x381 = (uint128_t) x347 * x376 + (uint128_t) x345 * x378 + (uint128_t) x346 * x377;
- uint128_t x382 = (uint128_t) x347 * x375 + (uint128_t) x344 * x378 + (uint128_t) x346 * x376 + (uint128_t) x345 * x377;
- uint128_t x383 = (uint128_t) x347 * x374 + (uint128_t) x343 * x378 + (uint128_t) x344 * x377 + (uint128_t) x346 * x375 + (uint128_t) x345 * x376;
- uint64_t x384 = x343 * 0x13;
- uint64_t x385 = x346 * 0x13;
- uint64_t x386 = x345 * 0x13;
- uint64_t x387 = x344 * 0x13;
- uint128_t x388 = x379 + (uint128_t) x384 * x377 + (uint128_t) x385 * x374 + (uint128_t) x386 * x375 + (uint128_t) x387 * x376;
- uint128_t x389 = x380 + (uint128_t) x384 * x376 + (uint128_t) x386 * x374 + (uint128_t) x387 * x375;
- uint128_t x390 = x381 + (uint128_t) x384 * x375 + (uint128_t) x387 * x374;
- uint128_t x391 = x382 + (uint128_t) x384 * x374;
- uint64_t x392 = (uint64_t) (x388 >> 0x33);
- uint64_t x393 = (uint64_t) x388 & 0x7ffffffffffff;
- uint128_t x394 = x392 + x389;
- uint64_t x395 = (uint64_t) (x394 >> 0x33);
- uint64_t x396 = (uint64_t) x394 & 0x7ffffffffffff;
- uint128_t x397 = x395 + x390;
- uint64_t x398 = (uint64_t) (x397 >> 0x33);
- uint64_t x399 = (uint64_t) x397 & 0x7ffffffffffff;
- uint128_t x400 = x398 + x391;
- uint64_t x401 = (uint64_t) (x400 >> 0x33);
- uint64_t x402 = (uint64_t) x400 & 0x7ffffffffffff;
- uint128_t x403 = x401 + x383;
- uint64_t x404 = (uint64_t) (x403 >> 0x33);
- uint64_t x405 = (uint64_t) x403 & 0x7ffffffffffff;
- uint64_t x406 = x393 + 0x13 * x404;
- uint64_t x407 = x406 >> 0x33;
- uint64_t x408 = x406 & 0x7ffffffffffff;
- uint64_t x409 = x407 + x396;
- uint64_t x410 = x409 >> 0x33;
- uint64_t x411 = x409 & 0x7ffffffffffff;
- uint64_t x412 = x410 + x399;
- return (Return x335, Return x332, Return x342, Return x341, Return x338, (Return x405, Return x402, Return x412, Return x411, Return x408), (Return x174, Return x171, Return x181, Return x180, Return x177, (Return x239, Return x236, Return x246, Return x245, Return x242))))
-(x, (x0, x1), (x2, x3))%core in
-(let (a0, b0) := a in
-(a0, b0), let (a0, b0) := b in
-(a0, b0))%core
- : word64 * word64 * word64 * word64 * word64 → word64 * word64 * word64 * word64 * word64 → word64 * word64 * word64 * word64 * word64 → word64 * word64 * word64 * word64 * word64 → word64 * word64 * word64 * word64 * word64 → word64 * word64 * word64 * word64 * word64 * (word64 * word64 * word64 * word64 * word64) * (word64 * word64 * word64 * word64 * word64 * (word64 * word64 * word64 * word64 * word64))
diff --git a/src/Specific/IntegrationTestMulDisplay.log b/src/Specific/IntegrationTestMulDisplay.log
deleted file mode 100644
index e49d7c117..000000000
--- a/src/Specific/IntegrationTestMulDisplay.log
+++ /dev/null
@@ -1,40 +0,0 @@
-λ x x0 : word64 * word64 * word64 * word64 * word64,
-Interp-η
-(λ var : Syntax.base_type → Type,
- λ '(x10, x11, x9, x7, x5, (x18, x19, x17, x15, x13))%core,
- uint128_t x20 = (uint128_t) x5 * x13;
- uint128_t x21 = (uint128_t) x5 * x15 + (uint128_t) x7 * x13;
- uint128_t x22 = (uint128_t) x5 * x17 + (uint128_t) x9 * x13 + (uint128_t) x7 * x15;
- uint128_t x23 = (uint128_t) x5 * x19 + (uint128_t) x11 * x13 + (uint128_t) x7 * x17 + (uint128_t) x9 * x15;
- uint128_t x24 = (uint128_t) x5 * x18 + (uint128_t) x10 * x13 + (uint128_t) x11 * x15 + (uint128_t) x7 * x19 + (uint128_t) x9 * x17;
- uint64_t x25 = x10 * 0x13;
- uint64_t x26 = x7 * 0x13;
- uint64_t x27 = x9 * 0x13;
- uint64_t x28 = x11 * 0x13;
- uint128_t x29 = x20 + (uint128_t) x25 * x15 + (uint128_t) x26 * x18 + (uint128_t) x27 * x19 + (uint128_t) x28 * x17;
- uint128_t x30 = x21 + (uint128_t) x25 * x17 + (uint128_t) x27 * x18 + (uint128_t) x28 * x19;
- uint128_t x31 = x22 + (uint128_t) x25 * x19 + (uint128_t) x28 * x18;
- uint128_t x32 = x23 + (uint128_t) x25 * x18;
- uint64_t x33 = (uint64_t) (x29 >> 0x33);
- uint64_t x34 = (uint64_t) x29 & 0x7ffffffffffff;
- uint128_t x35 = x33 + x30;
- uint64_t x36 = (uint64_t) (x35 >> 0x33);
- uint64_t x37 = (uint64_t) x35 & 0x7ffffffffffff;
- uint128_t x38 = x36 + x31;
- uint64_t x39 = (uint64_t) (x38 >> 0x33);
- uint64_t x40 = (uint64_t) x38 & 0x7ffffffffffff;
- uint128_t x41 = x39 + x32;
- uint64_t x42 = (uint64_t) (x41 >> 0x33);
- uint64_t x43 = (uint64_t) x41 & 0x7ffffffffffff;
- uint128_t x44 = x42 + x24;
- uint64_t x45 = (uint64_t) (x44 >> 0x33);
- uint64_t x46 = (uint64_t) x44 & 0x7ffffffffffff;
- uint64_t x47 = x34 + 0x13 * x45;
- uint64_t x48 = x47 >> 0x33;
- uint64_t x49 = x47 & 0x7ffffffffffff;
- uint64_t x50 = x48 + x37;
- uint64_t x51 = x50 >> 0x33;
- uint64_t x52 = x50 & 0x7ffffffffffff;
- return (Return x46, Return x43, x51 + x40, Return x52, Return x49))
-(x, x0)%core
- : word64 * word64 * word64 * word64 * word64 → word64 * word64 * word64 * word64 * word64 → ReturnType (uint64_t * uint64_t * uint64_t * uint64_t * uint64_t)
diff --git a/src/Specific/IntegrationTestSquareDisplay.log b/src/Specific/IntegrationTestSquareDisplay.log
deleted file mode 100644
index 006b83cd9..000000000
--- a/src/Specific/IntegrationTestSquareDisplay.log
+++ /dev/null
@@ -1,37 +0,0 @@
-λ x : word64 * word64 * word64 * word64 * word64,
-Interp-η
-(λ var : Syntax.base_type → Type,
- λ '(x7, x8, x6, x4, x2)%core,
- uint64_t x9 = x2 * 0x2;
- uint64_t x10 = x4 * 0x2;
- uint64_t x11 = x6 * 0x2 * 0x13;
- uint64_t x12 = x7 * 0x13;
- uint64_t x13 = x12 * 0x2;
- uint128_t x14 = (uint128_t) x2 * x2 + (uint128_t) x13 * x4 + (uint128_t) x11 * x8;
- uint128_t x15 = (uint128_t) x9 * x4 + (uint128_t) x13 * x6 + (uint128_t) x8 * (x8 * 0x13);
- uint128_t x16 = (uint128_t) x9 * x6 + (uint128_t) x4 * x4 + (uint128_t) x13 * x8;
- uint128_t x17 = (uint128_t) x9 * x8 + (uint128_t) x10 * x6 + (uint128_t) x7 * x12;
- uint128_t x18 = (uint128_t) x9 * x7 + (uint128_t) x10 * x8 + (uint128_t) x6 * x6;
- uint64_t x19 = (uint64_t) (x14 >> 0x33);
- uint64_t x20 = (uint64_t) x14 & 0x7ffffffffffff;
- uint128_t x21 = x19 + x15;
- uint64_t x22 = (uint64_t) (x21 >> 0x33);
- uint64_t x23 = (uint64_t) x21 & 0x7ffffffffffff;
- uint128_t x24 = x22 + x16;
- uint64_t x25 = (uint64_t) (x24 >> 0x33);
- uint64_t x26 = (uint64_t) x24 & 0x7ffffffffffff;
- uint128_t x27 = x25 + x17;
- uint64_t x28 = (uint64_t) (x27 >> 0x33);
- uint64_t x29 = (uint64_t) x27 & 0x7ffffffffffff;
- uint128_t x30 = x28 + x18;
- uint64_t x31 = (uint64_t) (x30 >> 0x33);
- uint64_t x32 = (uint64_t) x30 & 0x7ffffffffffff;
- uint64_t x33 = x20 + 0x13 * x31;
- uint64_t x34 = x33 >> 0x33;
- uint64_t x35 = x33 & 0x7ffffffffffff;
- uint64_t x36 = x34 + x23;
- uint64_t x37 = x36 >> 0x33;
- uint64_t x38 = x36 & 0x7ffffffffffff;
- return (Return x32, Return x29, x37 + x26, Return x38, Return x35))
-x
- : word64 * word64 * word64 * word64 * word64 → ReturnType (uint64_t * uint64_t * uint64_t * uint64_t * uint64_t)
diff --git a/src/Specific/X25519/C64/compiler.sh b/src/Specific/X25519/C64/compiler.sh
new file mode 100755
index 000000000..eb6c09eba
--- /dev/null
+++ b/src/Specific/X25519/C64/compiler.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -euo pipefail
+
+gcc -march=native -mtune=native -O3 -flto -fomit-frame-pointer -fwrapv -Wno-attributes $@
diff --git a/src/Specific/IntegrationTestMul.v b/src/Specific/X25519/C64/femul.v
index 01d629bfb..01d629bfb 100644
--- a/src/Specific/IntegrationTestMul.v
+++ b/src/Specific/X25519/C64/femul.v
diff --git a/src/Specific/IntegrationTestMulDisplay.v b/src/Specific/X25519/C64/femulDisplay.v
index bf6a3e1c4..35c557ce5 100644
--- a/src/Specific/IntegrationTestMulDisplay.v
+++ b/src/Specific/X25519/C64/femulDisplay.v
@@ -1,4 +1,4 @@
-Require Import Crypto.Specific.IntegrationTestMul.
+Require Import Crypto.Specific.X25519.C64.femul.
Require Import Crypto.Specific.IntegrationTestDisplayCommon.
Check display mul.
diff --git a/src/Specific/IntegrationTestSquare.v b/src/Specific/X25519/C64/fesquare.v
index d7d717c61..d7d717c61 100644
--- a/src/Specific/IntegrationTestSquare.v
+++ b/src/Specific/X25519/C64/fesquare.v
diff --git a/src/Specific/IntegrationTestSquareDisplay.v b/src/Specific/X25519/C64/fesquareDisplay.v
index bd5b9aaad..f39710fec 100644
--- a/src/Specific/IntegrationTestSquareDisplay.v
+++ b/src/Specific/X25519/C64/fesquareDisplay.v
@@ -1,4 +1,4 @@
-Require Import Crypto.Specific.IntegrationTestSquare.
+Require Import Crypto.Specific.X25519.C64.fesquare.
Require Import Crypto.Specific.IntegrationTestDisplayCommon.
Check display square.
diff --git a/src/Specific/IntegrationTestLadderstep.v b/src/Specific/X25519/C64/ladderstep.v
index b88370a48..b88370a48 100644
--- a/src/Specific/IntegrationTestLadderstep.v
+++ b/src/Specific/X25519/C64/ladderstep.v
diff --git a/src/Specific/IntegrationTestLadderstepDisplay.v b/src/Specific/X25519/C64/ladderstepDisplay.v
index d3bc2f836..73aab21ab 100644
--- a/src/Specific/IntegrationTestLadderstepDisplay.v
+++ b/src/Specific/X25519/C64/ladderstepDisplay.v
@@ -1,4 +1,4 @@
-Require Import Crypto.Specific.IntegrationTestLadderstep.
+Require Import Crypto.Specific.X25519.C64.ladderstep.
Require Import Crypto.Specific.IntegrationTestDisplayCommon.
Check display xzladderstep.
diff --git a/src/Specific/X25519/C64/scalarmult.c b/src/Specific/X25519/C64/scalarmult.c
new file mode 100644
index 000000000..ba0d6338d
--- /dev/null
+++ b/src/Specific/X25519/C64/scalarmult.c
@@ -0,0 +1,309 @@
+// The synthesized parts are from fiat-crypto, copyright MIT 2017.
+// The synthesis framework is released under the MIT license.
+// The non-synthesized parts are from curve25519-donna by Adam Langley (Google):
+/* Copyright 2008, Google Inc.
+ * All rights reserved.
+ *
+ * Code released into the public domain.
+ *
+ * curve25519-donna: Curve25519 elliptic curve, public key function
+ *
+ * http://code.google.com/p/curve25519-donna/
+ *
+ * (modified by Andres Erbsen)
+ * Adam Langley <agl@imperialviolet.org>
+ * Parts optimised by floodyberry
+ * Derived from public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
+ *
+ * More information about curve25519 can be found here
+ * http://cr.yp.to/ecdh.html
+ *
+ * djb's sample implementation of curve25519 is written in a special assembly
+ * language called qhasm and uses the floating point registers.
+ *
+ * This is, almost, a clean room reimplementation from the curve25519 paper. It
+ * uses many of the tricks described therein. Only the crecip function is taken
+ * from the sample implementation.
+ */
+
+#include <string.h>
+#include <stdint.h>
+
+#include "femul.h"
+#include "fesquare.h"
+#include "ladderstep.h"
+
+typedef unsigned int uint128_t __attribute__((mode(TI)));
+
+#undef force_inline
+#define force_inline __attribute__((always_inline))
+
+typedef uint8_t u8;
+typedef uint64_t limb;
+typedef limb felem[5];
+
+static void force_inline
+fmul(felem output, const felem in2, const felem in) {
+ uint64_t out[5];
+ femul(out,
+ in2[4], in2[3], in2[2], in2[1], in2[0],
+ in[4], in[3], in[2], in[1], in[0]);
+ output[4] = out[0];
+ output[3] = out[1];
+ output[2] = out[2];
+ output[1] = out[3];
+ output[0] = out[4];
+}
+
+static void force_inline
+fsquare_times(felem output, const felem in, limb count) {
+ uint64_t r0 = in[0];
+ uint64_t r1 = in[1];
+ uint64_t r2 = in[2];
+ uint64_t r3 = in[3];
+ uint64_t r4 = in[4];
+
+ do {
+ uint64_t out[5];
+ fesquare(out, r4, r3, r2, r1, r0);
+ r4 = out[0];
+ r3 = out[1];
+ r2 = out[2];
+ r1 = out[3];
+ r0 = out[4];
+ } while(--count);
+
+ output[0] = r0;
+ output[1] = r1;
+ output[2] = r2;
+ output[3] = r3;
+ output[4] = r4;
+}
+
+/* Take a little-endian, 32-byte number and expand it into polynomial form */
+static void
+fexpand(limb *output, const u8 *in) {
+ output[0] = *((const uint64_t *)(in)) & 0x7ffffffffffff;
+ output[1] = (*((const uint64_t *)(in+6)) >> 3) & 0x7ffffffffffff;
+ output[2] = (*((const uint64_t *)(in+12)) >> 6) & 0x7ffffffffffff;
+ output[3] = (*((const uint64_t *)(in+19)) >> 1) & 0x7ffffffffffff;
+ output[4] = (*((const uint64_t *)(in+25)) >> 4) & 0x7ffffffffffff;
+}
+
+/* Take a fully reduced polynomial form number and contract it into a
+ * little-endian, 32-byte array
+ */
+static void
+fcontract(u8 *output, const felem input) {
+ uint128_t t[5];
+
+ t[0] = input[0];
+ t[1] = input[1];
+ t[2] = input[2];
+ t[3] = input[3];
+ t[4] = input[4];
+
+ t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff;
+ t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff;
+ t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff;
+ t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff;
+ t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff;
+
+ t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff;
+ t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff;
+ t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff;
+ t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff;
+ t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff;
+
+ /* now t is between 0 and 2^255-1, properly carried. */
+ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
+
+ t[0] += 19;
+
+ t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff;
+ t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff;
+ t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff;
+ t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff;
+ t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff;
+
+ /* now between 19 and 2^255-1 in both cases, and offset by 19. */
+
+ t[0] += 0x8000000000000 - 19;
+ t[1] += 0x8000000000000 - 1;
+ t[2] += 0x8000000000000 - 1;
+ t[3] += 0x8000000000000 - 1;
+ t[4] += 0x8000000000000 - 1;
+
+ /* now between 2^255 and 2^256-20, and offset by 2^255. */
+
+ t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff;
+ t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff;
+ t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff;
+ t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff;
+ t[4] &= 0x7ffffffffffff;
+
+ *((uint64_t *)(output)) = t[0] | (t[1] << 51);
+ *((uint64_t *)(output+8)) = (t[1] >> 13) | (t[2] << 38);
+ *((uint64_t *)(output+16)) = (t[2] >> 26) | (t[3] << 25);
+ *((uint64_t *)(output+24)) = (t[3] >> 39) | (t[4] << 12);
+}
+
+/* Input: Q, Q', Q-Q'
+ * Output: 2Q, Q+Q'
+ */
+static void
+fmonty(limb *x2, limb *z2, /* output 2Q */
+ limb *x3, limb *z3, /* output Q + Q' */
+ limb *x, limb *z, /* input Q */
+ limb *xprime, limb *zprime, /* input Q' */
+ const limb *qmqp /* input Q - Q' */) {
+ uint64_t out[20];
+ ladderstep(out, qmqp[4], qmqp[3], qmqp[2], qmqp[1], qmqp[0], x[4], x[3], x[2], x[1], x[0], z[4], z[3], z[2], z[1], z[0], xprime[4], xprime[3], xprime[2], xprime[1], xprime[0], zprime[4], zprime[3], zprime[2], zprime[1], zprime[0]);
+ x2[4] = out[ 0];
+ x2[3] = out[ 1];
+ x2[2] = out[ 2];
+ x2[1] = out[ 3];
+ x2[0] = out[ 4];
+ z2[4] = out[ 5];
+ z2[3] = out[ 6];
+ z2[2] = out[ 7];
+ z2[1] = out[ 8];
+ z2[0] = out[ 9];
+ x3[4] = out[10];
+ x3[3] = out[11];
+ x3[2] = out[12];
+ x3[1] = out[13];
+ x3[0] = out[14];
+ z3[4] = out[15];
+ z3[3] = out[16];
+ z3[2] = out[17];
+ z3[1] = out[18];
+ z3[0] = out[19];
+}
+
+// -----------------------------------------------------------------------------
+// Maybe swap the contents of two limb arrays (@a and @b), each @len elements
+// long. Perform the swap iff @swap is non-zero.
+//
+// This function performs the swap without leaking any side-channel
+// information.
+// -----------------------------------------------------------------------------
+static void
+swap_conditional(limb a[5], limb b[5], limb iswap) {
+ unsigned i;
+ const limb swap = -iswap;
+
+ for (i = 0; i < 5; ++i) {
+ const limb x = swap & (a[i] ^ b[i]);
+ a[i] ^= x;
+ b[i] ^= x;
+ }
+}
+
+/* Calculates nQ where Q is the x-coordinate of a point on the curve
+ *
+ * resultx/resultz: the x coordinate of the resulting curve point (short form)
+ * n: a little endian, 32-byte number
+ * q: a point of the curve (short form)
+ */
+static void
+cmult(limb *resultx, limb *resultz, const u8 *n, const limb *q) {
+ limb a[5] = {0}, b[5] = {1}, c[5] = {1}, d[5] = {0};
+ limb *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t;
+ limb e[5] = {0}, f[5] = {1}, g[5] = {0}, h[5] = {1};
+ limb *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h;
+
+ unsigned i, j;
+
+ memcpy(nqpqx, q, sizeof(limb) * 5);
+
+ for (i = 0; i < 32; ++i) {
+ u8 byte = n[31 - i];
+ for (j = 0; j < 8; ++j) {
+ const limb bit = byte >> 7;
+
+ swap_conditional(nqx, nqpqx, bit);
+ swap_conditional(nqz, nqpqz, bit);
+ fmonty(nqx2, nqz2,
+ nqpqx2, nqpqz2,
+ nqx, nqz,
+ nqpqx, nqpqz,
+ q);
+ swap_conditional(nqx2, nqpqx2, bit);
+ swap_conditional(nqz2, nqpqz2, bit);
+
+ t = nqx;
+ nqx = nqx2;
+ nqx2 = t;
+ t = nqz;
+ nqz = nqz2;
+ nqz2 = t;
+ t = nqpqx;
+ nqpqx = nqpqx2;
+ nqpqx2 = t;
+ t = nqpqz;
+ nqpqz = nqpqz2;
+ nqpqz2 = t;
+
+ byte <<= 1;
+ }
+ }
+
+ memcpy(resultx, nqx, sizeof(limb) * 5);
+ memcpy(resultz, nqz, sizeof(limb) * 5);
+}
+
+
+// -----------------------------------------------------------------------------
+// Shamelessly copied from djb's code, tightened a little
+// -----------------------------------------------------------------------------
+static void
+crecip(felem out, const felem z) {
+ felem a,t0,b,c;
+
+ /* 2 */ fsquare_times(a, z, 1); // a = 2
+ /* 8 */ fsquare_times(t0, a, 2);
+ /* 9 */ fmul(b, t0, z); // b = 9
+ /* 11 */ fmul(a, b, a); // a = 11
+ /* 22 */ fsquare_times(t0, a, 1);
+ /* 2^5 - 2^0 = 31 */ fmul(b, t0, b);
+ /* 2^10 - 2^5 */ fsquare_times(t0, b, 5);
+ /* 2^10 - 2^0 */ fmul(b, t0, b);
+ /* 2^20 - 2^10 */ fsquare_times(t0, b, 10);
+ /* 2^20 - 2^0 */ fmul(c, t0, b);
+ /* 2^40 - 2^20 */ fsquare_times(t0, c, 20);
+ /* 2^40 - 2^0 */ fmul(t0, t0, c);
+ /* 2^50 - 2^10 */ fsquare_times(t0, t0, 10);
+ /* 2^50 - 2^0 */ fmul(b, t0, b);
+ /* 2^100 - 2^50 */ fsquare_times(t0, b, 50);
+ /* 2^100 - 2^0 */ fmul(c, t0, b);
+ /* 2^200 - 2^100 */ fsquare_times(t0, c, 100);
+ /* 2^200 - 2^0 */ fmul(t0, t0, c);
+ /* 2^250 - 2^50 */ fsquare_times(t0, t0, 50);
+ /* 2^250 - 2^0 */ fmul(t0, t0, b);
+ /* 2^255 - 2^5 */ fsquare_times(t0, t0, 5);
+ /* 2^255 - 21 */ fmul(out, t0, a);
+}
+
+int
+crypto_scalarmult(u8 *mypublic, const u8 *secret, const u8 *basepoint) {
+ limb bp[5], x[5], z[5], zmone[5];
+ uint8_t e[32];
+ int i;
+
+ for (i = 0;i < 32;++i) e[i] = secret[i];
+ e[0] &= 248;
+ e[31] &= 127;
+ e[31] |= 64;
+
+ fexpand(bp, basepoint);
+ cmult(x, z, e, bp);
+ crecip(zmone, z);
+ fmul(z, x, zmone);
+ fcontract(mypublic, z);
+ return 0;
+}
+
+void crypto_scalarmult_bench(unsigned char* buf) {
+ crypto_scalarmult(buf, buf+32, buf+64);
+}