diff --git a/conf/modules/haclc.xml b/conf/modules/haclc.xml
new file mode 100644
index 0000000000..350062b40f
--- /dev/null
+++ b/conf/modules/haclc.xml
@@ -0,0 +1,25 @@
+
+
+
+
+
+ HACL* verified cryptographic library
+ see https://github.com/mitls/hacl-star for details
+
+
+
+
+
+
+
+
+
+
+
+
+ # to not use 128 bit arithmetic
+ ap.CFLAGS += -DKRML_NOUINT128
+
+
+
+
diff --git a/conf/modules/telemetry_transparent_secure.xml b/conf/modules/telemetry_transparent_secure.xml
new file mode 100644
index 0000000000..bd77d1e1a8
--- /dev/null
+++ b/conf/modules/telemetry_transparent_secure.xml
@@ -0,0 +1,49 @@
+
+
+
+
+
+ Telemetry using secure PPRZ protocol over UART
+ Similar to telemetry_transparent, except it uses message scheduling and encryption
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/sw/airborne/modules/datalink/hacl-c/AEAD_Poly1305_64.c b/sw/airborne/modules/datalink/hacl-c/AEAD_Poly1305_64.c
new file mode 100644
index 0000000000..183a3a1a61
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/AEAD_Poly1305_64.c
@@ -0,0 +1,475 @@
+#include "AEAD_Poly1305_64.h"
+
+inline static void Hacl_Bignum_Modulo_reduce(uint64_t *b)
+{
+ uint64_t b0 = b[0];
+ b[0] = (b0 << (uint32_t )4) + (b0 << (uint32_t )2);
+}
+
+inline static void Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+ uint64_t b2 = b[2];
+ uint64_t b0 = b[0];
+ uint64_t b2_42 = b2 >> (uint32_t )42;
+ b[2] = b2 & (uint64_t )0x3ffffffffff;
+ b[0] = (b2_42 << (uint32_t )2) + b2_42 + b0;
+}
+
+inline static void Hacl_Bignum_Modulo_carry_top_wide(FStar_UInt128_t *b)
+{
+ FStar_UInt128_t b2 = b[2];
+ FStar_UInt128_t b0 = b[0];
+ FStar_UInt128_t
+ b2_ = FStar_UInt128_logand(b2, FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x3ffffffffff));
+ uint64_t
+ b2_42 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b2, (uint32_t )42));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_Int_Cast_Full_uint64_to_uint128((b2_42 << (uint32_t )2) + b2_42));
+ b[2] = b2_;
+ b[0] = b0_;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )3; i = i + (uint32_t )1)
+ {
+ FStar_UInt128_t uu____429 = input[i];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[i] = uu____428;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_shift(uint64_t *output)
+{
+ uint64_t tmp = output[2];
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint32_t ctr = (uint32_t )3 - i - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ output[0] = tmp;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
+ FStar_UInt128_t *output,
+ uint64_t *input,
+ uint64_t s
+)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )3; i = i + (uint32_t )1)
+ {
+ FStar_UInt128_t uu____871 = output[i];
+ uint64_t uu____874 = input[i];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[i] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint32_t ctr = i;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )44);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_carry_limb_(uint64_t *tmp)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint32_t ctr = i;
+ uint64_t tctr = tmp[ctr];
+ uint64_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t r0 = tctr & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ uint64_t c = tctr >> (uint32_t )44;
+ tmp[ctr] = r0;
+ tmp[ctr + (uint32_t )1] = tctrp1 + c;
+ }
+}
+
+inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+ Hacl_Bignum_Fproduct_shift(output);
+ Hacl_Bignum_Modulo_reduce(output);
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input2)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint64_t input2i = input2[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ uint32_t i = (uint32_t )2;
+ uint64_t input2i = input2[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void Hacl_Bignum_Fmul_fmul_(uint64_t *output, uint64_t *input, uint64_t *input2)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )3);
+ FStar_UInt128_t t[3];
+ for (uintmax_t _i = 0; _i < (uint32_t )3; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Bignum_Fmul_mul_shift_reduce_(t, input, input2);
+ Hacl_Bignum_Fproduct_carry_wide_(t);
+ Hacl_Bignum_Modulo_carry_top_wide(t);
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+ uint64_t i0 = output[0];
+ uint64_t i1 = output[1];
+ uint64_t i0_ = i0 & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )44);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input2)
+{
+ uint64_t tmp[3] = { 0 };
+ memcpy(tmp, input, (uint32_t )3 * sizeof input[0]);
+ Hacl_Bignum_Fmul_fmul_(output, tmp, input2);
+}
+
+inline static void
+Hacl_Bignum_AddAndMultiply_add_and_multiply(uint64_t *acc, uint64_t *block, uint64_t *r)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )3; i = i + (uint32_t )1)
+ {
+ uint64_t uu____871 = acc[i];
+ uint64_t uu____874 = block[i];
+ uint64_t uu____870 = uu____871 + uu____874;
+ acc[i] = uu____870;
+ }
+ Hacl_Bignum_Fmul_fmul(acc, acc, r);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_update(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m
+)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *r3 = r;
+ uint64_t tmp[3] = { 0 };
+ FStar_UInt128_t m0 = load128_le(m);
+ uint64_t r0 = FStar_Int_Cast_Full_uint128_to_uint64(m0) & (uint64_t )0xfffffffffff;
+ uint64_t
+ r1 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )44))
+ & (uint64_t )0xfffffffffff;
+ uint64_t
+ r2 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )88));
+ tmp[0] = r0;
+ tmp[1] = r1;
+ tmp[2] = r2;
+ uint64_t b2 = tmp[2];
+ uint64_t b2_ = (uint64_t )0x10000000000 | b2;
+ tmp[2] = b2_;
+ Hacl_Bignum_AddAndMultiply_add_and_multiply(acc, tmp, r3);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_process_last_block_(
+ uint8_t *block,
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t rem_
+)
+{
+ uint64_t tmp[3] = { 0 };
+ FStar_UInt128_t m0 = load128_le(block);
+ uint64_t r0 = FStar_Int_Cast_Full_uint128_to_uint64(m0) & (uint64_t )0xfffffffffff;
+ uint64_t
+ r1 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )44))
+ & (uint64_t )0xfffffffffff;
+ uint64_t
+ r2 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )88));
+ tmp[0] = r0;
+ tmp[1] = r1;
+ tmp[2] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ Hacl_Bignum_AddAndMultiply_add_and_multiply(h, tmp, r);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_process_last_block(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t rem_
+)
+{
+ uint8_t zero1 = (uint8_t )0;
+ KRML_CHECK_SIZE(zero1, (uint32_t )16);
+ uint8_t block[16];
+ for (uintmax_t _i = 0; _i < (uint32_t )16; ++_i)
+ block[_i] = zero1;
+ uint32_t i0 = (uint32_t )rem_;
+ uint32_t i = (uint32_t )rem_;
+ memcpy(block, m, i * sizeof m[0]);
+ block[i0] = (uint8_t )1;
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block_(block, st, m, rem_);
+}
+
+static void Hacl_Impl_Poly1305_64_poly1305_last_pass(uint64_t *acc)
+{
+ Hacl_Bignum_Fproduct_carry_limb_(acc);
+ Hacl_Bignum_Modulo_carry_top(acc);
+ uint64_t a0 = acc[0];
+ uint64_t a10 = acc[1];
+ uint64_t a20 = acc[2];
+ uint64_t a0_ = a0 & (uint64_t )0xfffffffffff;
+ uint64_t r0 = a0 >> (uint32_t )44;
+ uint64_t a1_ = (a10 + r0) & (uint64_t )0xfffffffffff;
+ uint64_t r1 = (a10 + r0) >> (uint32_t )44;
+ uint64_t a2_ = a20 + r1;
+ acc[0] = a0_;
+ acc[1] = a1_;
+ acc[2] = a2_;
+ Hacl_Bignum_Modulo_carry_top(acc);
+ uint64_t i0 = acc[0];
+ uint64_t i1 = acc[1];
+ uint64_t i0_ = i0 & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )44);
+ acc[0] = i0_;
+ acc[1] = i1_;
+ uint64_t a00 = acc[0];
+ uint64_t a1 = acc[1];
+ uint64_t a2 = acc[2];
+ uint64_t mask0 = FStar_UInt64_gte_mask(a00, (uint64_t )0xffffffffffb);
+ uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t )0xfffffffffff);
+ uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t )0x3ffffffffff);
+ uint64_t mask = mask0 & mask1 & mask2;
+ uint64_t a0_0 = a00 - ((uint64_t )0xffffffffffb & mask);
+ uint64_t a1_0 = a1 - ((uint64_t )0xfffffffffff & mask);
+ uint64_t a2_0 = a2 - ((uint64_t )0x3ffffffffff & mask);
+ acc[0] = a0_0;
+ acc[1] = a1_0;
+ acc[2] = a2_0;
+}
+
+static Hacl_Impl_Poly1305_64_State_poly1305_state
+Hacl_Impl_Poly1305_64_mk_state(uint64_t *r, uint64_t *h)
+{
+ return ((Hacl_Impl_Poly1305_64_State_poly1305_state ){ .r = r, .h = h });
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_blocks(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t len1
+)
+{
+ if (len1 == (uint64_t )0)
+ {
+
+ }
+ else
+ {
+ uint8_t *block = m;
+ uint8_t *tail1 = m + (uint32_t )16;
+ Hacl_Impl_Poly1305_64_poly1305_update(st, block);
+ uint64_t len2 = len1 - (uint64_t )1;
+ Hacl_Standalone_Poly1305_64_poly1305_blocks(st, tail1, len2);
+ }
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_partial(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *kr
+)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *x0 = r;
+ FStar_UInt128_t k1 = load128_le(kr);
+ FStar_UInt128_t
+ k_clamped =
+ FStar_UInt128_logand(k1,
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x0ffffffc0ffffffc),
+ (uint32_t )64),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x0ffffffc0fffffff)));
+ uint64_t r0 = FStar_Int_Cast_Full_uint128_to_uint64(k_clamped) & (uint64_t )0xfffffffffff;
+ uint64_t
+ r1 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t )44))
+ & (uint64_t )0xfffffffffff;
+ uint64_t
+ r2 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t )88));
+ x0[0] = r0;
+ x0[1] = r1;
+ x0[2] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *x00 = h;
+ x00[0] = (uint64_t )0;
+ x00[1] = (uint64_t )0;
+ x00[2] = (uint64_t )0;
+ Hacl_Standalone_Poly1305_64_poly1305_blocks(st, input, len1);
+}
+
+Prims_nat AEAD_Poly1305_64_seval(void *b)
+{
+ printf("KreMLin abort at %s:%d\n%s\n", __FILE__, __LINE__, "noextract flag");
+ exit(255);
+}
+
+Prims_int AEAD_Poly1305_64_selem(void *s)
+{
+ printf("KreMLin abort at %s:%d\n%s\n", __FILE__, __LINE__, "noextract flag");
+ exit(255);
+}
+
+Hacl_Impl_Poly1305_64_State_poly1305_state
+AEAD_Poly1305_64_mk_state(uint64_t *r, uint64_t *acc)
+{
+ return Hacl_Impl_Poly1305_64_mk_state(r, acc);
+}
+
+uint32_t AEAD_Poly1305_64_mul_div_16(uint32_t len1)
+{
+ return (uint32_t )16 * (len1 >> (uint32_t )4);
+}
+
+void
+AEAD_Poly1305_64_pad_last(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint8_t b[16];
+ if (len1 == (uint32_t )0)
+ {
+
+ }
+ else
+ {
+ memset(b, 0, (uint32_t )16 * sizeof b[0]);
+ memcpy(b, input, len1 * sizeof input[0]);
+ uint8_t *b0 = b;
+ Hacl_Impl_Poly1305_64_poly1305_update(st, b0);
+ }
+}
+
+void
+AEAD_Poly1305_64_poly1305_blocks_init(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint32_t len1,
+ uint8_t *k1
+)
+{
+ uint32_t len_16 = len1 >> (uint32_t )4;
+ uint32_t rem_16 = len1 & (uint32_t )15;
+ uint8_t *kr = k1;
+ uint32_t len_ = (uint32_t )16 * (len1 >> (uint32_t )4);
+ uint8_t *part_input = input;
+ uint8_t *last_block = input + len_;
+ Hacl_Standalone_Poly1305_64_poly1305_partial(st, part_input, (uint64_t )len_16, kr);
+ AEAD_Poly1305_64_pad_last(st, last_block, rem_16);
+}
+
+void
+AEAD_Poly1305_64_poly1305_blocks_continue(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint32_t len_16 = len1 >> (uint32_t )4;
+ uint32_t rem_16 = len1 & (uint32_t )15;
+ uint32_t len_ = (uint32_t )16 * (len1 >> (uint32_t )4);
+ uint8_t *part_input = input;
+ uint8_t *last_block = input + len_;
+ Hacl_Standalone_Poly1305_64_poly1305_blocks(st, part_input, (uint64_t )len_16);
+ AEAD_Poly1305_64_pad_last(st, last_block, rem_16);
+}
+
+void
+AEAD_Poly1305_64_poly1305_blocks_finish_(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input
+)
+{
+ Hacl_Impl_Poly1305_64_poly1305_update(st, input);
+ uint8_t *x2 = input + (uint32_t )16;
+ if ((uint64_t )0 == (uint64_t )0)
+ {
+
+ }
+ else
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block(st, x2, (uint64_t )0);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_poly1305_last_pass(acc);
+}
+
+void
+AEAD_Poly1305_64_poly1305_blocks_finish(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint8_t *mac,
+ uint8_t *key_s
+)
+{
+ Hacl_Impl_Poly1305_64_poly1305_update(st, input);
+ uint8_t *x2 = input + (uint32_t )16;
+ if ((uint64_t )0 == (uint64_t )0)
+ {
+
+ }
+ else
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block(st, x2, (uint64_t )0);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_poly1305_last_pass(acc);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h3 = scrut0.h;
+ uint64_t *acc0 = h3;
+ FStar_UInt128_t k_ = load128_le(key_s);
+ uint64_t h0 = acc0[0];
+ uint64_t h1 = acc0[1];
+ uint64_t h2 = acc0[2];
+ FStar_UInt128_t
+ acc_ =
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128(h2
+ << (uint32_t )24
+ | h1 >> (uint32_t )20),
+ (uint32_t )64),
+ FStar_Int_Cast_Full_uint64_to_uint128(h1 << (uint32_t )44 | h0));
+ FStar_UInt128_t mac_ = FStar_UInt128_add_mod(acc_, k_);
+ store128_le(mac, mac_);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/AEAD_Poly1305_64.h b/sw/airborne/modules/datalink/hacl-c/AEAD_Poly1305_64.h
new file mode 100644
index 0000000000..dde1dfe6f7
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/AEAD_Poly1305_64.h
@@ -0,0 +1,101 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __AEAD_Poly1305_64_H
+#define __AEAD_Poly1305_64_H
+
+
+
+#include "testlib.h"
+
+typedef uint64_t Hacl_Bignum_Constants_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Constants_wide;
+
+typedef FStar_UInt128_t Hacl_Bignum_Wide_t;
+
+typedef uint64_t Hacl_Bignum_Limb_t;
+
+typedef void *Hacl_Impl_Poly1305_64_State_log_t;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_State_bigint;
+
+typedef void *Hacl_Impl_Poly1305_64_State_seqelem;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_State_elemB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_wordB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_wordB_16;
+
+typedef struct
+{
+ uint64_t *r;
+ uint64_t *h;
+}
+Hacl_Impl_Poly1305_64_State_poly1305_state;
+
+typedef void *Hacl_Impl_Poly1305_64_log_t;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_bigint;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_elemB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_wordB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_wordB_16;
+
+typedef uint8_t *AEAD_Poly1305_64_uint8_p;
+
+typedef uint8_t *AEAD_Poly1305_64_key;
+
+Prims_nat AEAD_Poly1305_64_seval(void *b);
+
+Prims_int AEAD_Poly1305_64_selem(void *s);
+
+typedef Hacl_Impl_Poly1305_64_State_poly1305_state AEAD_Poly1305_64_state;
+
+Hacl_Impl_Poly1305_64_State_poly1305_state
+AEAD_Poly1305_64_mk_state(uint64_t *r, uint64_t *acc);
+
+uint32_t AEAD_Poly1305_64_mul_div_16(uint32_t len1);
+
+void
+AEAD_Poly1305_64_pad_last(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint32_t len1
+);
+
+void
+AEAD_Poly1305_64_poly1305_blocks_init(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint32_t len1,
+ uint8_t *k1
+);
+
+void
+AEAD_Poly1305_64_poly1305_blocks_continue(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint32_t len1
+);
+
+void
+AEAD_Poly1305_64_poly1305_blocks_finish_(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input
+);
+
+void
+AEAD_Poly1305_64_poly1305_blocks_finish(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint8_t *mac,
+ uint8_t *key_s
+);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Chacha20.c b/sw/airborne/modules/datalink/hacl-c/Chacha20.c
new file mode 100644
index 0000000000..81bd60fc5c
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Chacha20.c
@@ -0,0 +1,246 @@
+#include "Chacha20.h"
+
+static void
+Hacl_Lib_LoadStore32_uint32s_from_le_bytes(uint32_t *output, uint8_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *x0 = input + (uint32_t )4 * i;
+ uint32_t inputi = load32_le(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Lib_LoadStore32_uint32s_to_le_bytes(uint8_t *output, uint32_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint32_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t )4 * i;
+ store32_le(x0, hd1);
+ }
+}
+
+inline static void Hacl_Impl_Chacha20_setup(uint32_t *st, uint8_t *k, uint8_t *n1, uint32_t c)
+{
+ uint32_t *stcst = st;
+ uint32_t *stk = st + (uint32_t )4;
+ uint32_t *stc = st + (uint32_t )12;
+ uint32_t *stn = st + (uint32_t )13;
+ stcst[0] = (uint32_t )0x61707865;
+ stcst[1] = (uint32_t )0x3320646e;
+ stcst[2] = (uint32_t )0x79622d32;
+ stcst[3] = (uint32_t )0x6b206574;
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(stk, k, (uint32_t )8);
+ stc[0] = c;
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(stn, n1, (uint32_t )3);
+}
+
+inline static void
+Hacl_Impl_Chacha20_quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+ uint32_t sa = st[a];
+ uint32_t sb0 = st[b];
+ st[a] = sa + sb0;
+ uint32_t sd = st[d];
+ uint32_t sa10 = st[a];
+ uint32_t sda = sd ^ sa10;
+ st[d] = sda << (uint32_t )16 | sda >> (uint32_t )32 - (uint32_t )16;
+ uint32_t sa0 = st[c];
+ uint32_t sb1 = st[d];
+ st[c] = sa0 + sb1;
+ uint32_t sd0 = st[b];
+ uint32_t sa11 = st[c];
+ uint32_t sda0 = sd0 ^ sa11;
+ st[b] = sda0 << (uint32_t )12 | sda0 >> (uint32_t )32 - (uint32_t )12;
+ uint32_t sa2 = st[a];
+ uint32_t sb2 = st[b];
+ st[a] = sa2 + sb2;
+ uint32_t sd1 = st[d];
+ uint32_t sa12 = st[a];
+ uint32_t sda1 = sd1 ^ sa12;
+ st[d] = sda1 << (uint32_t )8 | sda1 >> (uint32_t )32 - (uint32_t )8;
+ uint32_t sa3 = st[c];
+ uint32_t sb = st[d];
+ st[c] = sa3 + sb;
+ uint32_t sd2 = st[b];
+ uint32_t sa1 = st[c];
+ uint32_t sda2 = sd2 ^ sa1;
+ st[b] = sda2 << (uint32_t )7 | sda2 >> (uint32_t )32 - (uint32_t )7;
+}
+
+inline static void Hacl_Impl_Chacha20_double_round(uint32_t *st)
+{
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )0, (uint32_t )4, (uint32_t )8, (uint32_t )12);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )1, (uint32_t )5, (uint32_t )9, (uint32_t )13);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )2, (uint32_t )6, (uint32_t )10, (uint32_t )14);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )3, (uint32_t )7, (uint32_t )11, (uint32_t )15);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )0, (uint32_t )5, (uint32_t )10, (uint32_t )15);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )1, (uint32_t )6, (uint32_t )11, (uint32_t )12);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )2, (uint32_t )7, (uint32_t )8, (uint32_t )13);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t )3, (uint32_t )4, (uint32_t )9, (uint32_t )14);
+}
+
+inline static void Hacl_Impl_Chacha20_rounds(uint32_t *st)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )10; i = i + (uint32_t )1)
+ Hacl_Impl_Chacha20_double_round(st);
+}
+
+inline static void Hacl_Impl_Chacha20_sum_states(uint32_t *st, uint32_t *st_)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint32_t uu____871 = st[i];
+ uint32_t uu____874 = st_[i];
+ uint32_t uu____870 = uu____871 + uu____874;
+ st[i] = uu____870;
+ }
+}
+
+inline static void Hacl_Impl_Chacha20_copy_state(uint32_t *st, uint32_t *st_)
+{
+ memcpy(st, st_, (uint32_t )16 * sizeof st_[0]);
+}
+
+inline static void Hacl_Impl_Chacha20_chacha20_core(uint32_t *k, uint32_t *st, uint32_t ctr)
+{
+ st[12] = ctr;
+ Hacl_Impl_Chacha20_copy_state(k, st);
+ Hacl_Impl_Chacha20_rounds(k);
+ Hacl_Impl_Chacha20_sum_states(k, st);
+}
+
+inline static void
+Hacl_Impl_Chacha20_chacha20_block(uint8_t *stream_block, uint32_t *st, uint32_t ctr)
+{
+ uint32_t st_[16] = { 0 };
+ Hacl_Impl_Chacha20_chacha20_core(st_, st, ctr);
+ Hacl_Lib_LoadStore32_uint32s_to_le_bytes(stream_block, st_, (uint32_t )16);
+}
+
+inline static void Hacl_Impl_Chacha20_init(uint32_t *st, uint8_t *k, uint8_t *n1)
+{
+ Hacl_Impl_Chacha20_setup(st, k, n1, (uint32_t )0);
+}
+
+static void
+Hacl_Impl_Chacha20_update_last(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint32_t ctr
+)
+{
+ uint8_t block[64] = { 0 };
+ Hacl_Impl_Chacha20_chacha20_block(block, st, ctr);
+ uint8_t *mask = block;
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t uu____602 = plain[i];
+ uint8_t uu____605 = mask[i];
+ uint8_t uu____601 = uu____602 ^ uu____605;
+ output[i] = uu____601;
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_update(uint8_t *output, uint8_t *plain, uint32_t *st, uint32_t ctr)
+{
+ uint32_t b[48] = { 0 };
+ uint32_t *k = b;
+ uint32_t *ib = b + (uint32_t )16;
+ uint32_t *ob = b + (uint32_t )32;
+ Hacl_Impl_Chacha20_chacha20_core(k, st, ctr);
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(ib, plain, (uint32_t )16);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint32_t uu____602 = ib[i];
+ uint32_t uu____605 = k[i];
+ uint32_t uu____601 = uu____602 ^ uu____605;
+ ob[i] = uu____601;
+ }
+ Hacl_Lib_LoadStore32_uint32s_to_le_bytes(output, ob, (uint32_t )16);
+}
+
+static void
+Hacl_Impl_Chacha20_chacha20_counter_mode_blocks(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint32_t ctr
+)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *b = plain + (uint32_t )64 * i;
+ uint8_t *o = output + (uint32_t )64 * i;
+ Hacl_Impl_Chacha20_update(o, b, st, ctr + i);
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_chacha20_counter_mode(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint32_t ctr
+)
+{
+ uint32_t blocks_len = len >> (uint32_t )6;
+ uint32_t part_len = len & (uint32_t )0x3f;
+ uint8_t *output_ = output;
+ uint8_t *plain_ = plain;
+ uint8_t *output__ = output + (uint32_t )64 * blocks_len;
+ uint8_t *plain__ = plain + (uint32_t )64 * blocks_len;
+ Hacl_Impl_Chacha20_chacha20_counter_mode_blocks(output_, plain_, blocks_len, st, ctr);
+ if (part_len > (uint32_t )0)
+ Hacl_Impl_Chacha20_update_last(output__, plain__, part_len, st, ctr + blocks_len);
+}
+
+static void
+Hacl_Impl_Chacha20_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr
+)
+{
+ uint32_t buf[16] = { 0 };
+ uint32_t *st = buf;
+ Hacl_Impl_Chacha20_init(st, k, n1);
+ Hacl_Impl_Chacha20_chacha20_counter_mode(output, plain, len, st, ctr);
+}
+
+void *Chacha20_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *m)
+{
+ return (void *)(uint8_t )0;
+}
+
+void Chacha20_chacha20_key_block(uint8_t *block, uint8_t *k, uint8_t *n1, uint32_t ctr)
+{
+ uint32_t buf[16] = { 0 };
+ uint32_t *st = buf;
+ Hacl_Impl_Chacha20_init(st, k, n1);
+ Hacl_Impl_Chacha20_chacha20_block(block, st, ctr);
+}
+
+void
+Chacha20_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr
+)
+{
+ Hacl_Impl_Chacha20_chacha20(output, plain, len, k, n1, ctr);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Chacha20.h b/sw/airborne/modules/datalink/hacl-c/Chacha20.h
new file mode 100644
index 0000000000..bc83cc9dd8
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Chacha20.h
@@ -0,0 +1,54 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Chacha20_H
+#define __Chacha20_H
+
+
+
+#include "testlib.h"
+
+typedef uint32_t Hacl_Impl_Xor_Lemmas_u32;
+
+typedef uint8_t Hacl_Impl_Xor_Lemmas_u8;
+
+typedef uint8_t *Hacl_Lib_LoadStore32_uint8_p;
+
+typedef uint32_t Hacl_Impl_Chacha20_u32;
+
+typedef uint32_t Hacl_Impl_Chacha20_h32;
+
+typedef uint8_t *Hacl_Impl_Chacha20_uint8_p;
+
+typedef uint32_t *Hacl_Impl_Chacha20_state;
+
+typedef uint32_t Hacl_Impl_Chacha20_idx;
+
+typedef struct
+{
+ void *k;
+ void *n;
+}
+Hacl_Impl_Chacha20_log_t_;
+
+typedef void *Hacl_Impl_Chacha20_log_t;
+
+typedef uint32_t Hacl_Lib_Create_h32;
+
+typedef uint8_t *Chacha20_uint8_p;
+
+typedef uint32_t Chacha20_uint32_t;
+
+void *Chacha20_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *m);
+
+void Chacha20_chacha20_key_block(uint8_t *block, uint8_t *k, uint8_t *n1, uint32_t ctr);
+
+void
+Chacha20_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr
+);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Chacha20Poly1305.c b/sw/airborne/modules/datalink/hacl-c/Chacha20Poly1305.c
new file mode 100644
index 0000000000..6f9478b6bf
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Chacha20Poly1305.c
@@ -0,0 +1,109 @@
+#include "Chacha20Poly1305.h"
+
+Prims_int Chacha20Poly1305_noncelen;
+
+Prims_int Chacha20Poly1305_keylen;
+
+Prims_int Chacha20Poly1305_maclen;
+
+static void
+Chacha20Poly1305_aead_encrypt_poly(
+ uint8_t *c,
+ uint32_t mlen,
+ uint8_t *mac,
+ uint8_t *aad1,
+ uint32_t aadlen,
+ uint8_t *tmp
+)
+{
+ uint8_t *b = tmp;
+ uint8_t *lb = tmp + (uint32_t )64;
+ uint8_t *mk = b;
+ uint8_t *key_s = mk + (uint32_t )16;
+ uint64_t tmp1[6] = { 0 };
+ Hacl_Impl_Poly1305_64_State_poly1305_state
+ st = AEAD_Poly1305_64_mk_state(tmp1, tmp1 + (uint32_t )3);
+ (void )AEAD_Poly1305_64_poly1305_blocks_init(st, aad1, aadlen, mk);
+ (void )AEAD_Poly1305_64_poly1305_blocks_continue(st, c, mlen);
+ AEAD_Poly1305_64_poly1305_blocks_finish(st, lb, mac, key_s);
+}
+
+void Chacha20Poly1305_encode_length(uint8_t *lb, uint32_t aad_len, uint32_t mlen)
+{
+ store64_le(lb, (uint64_t )aad_len);
+ uint8_t *x0 = lb + (uint32_t )8;
+ store64_le(x0, (uint64_t )mlen);
+}
+
+uint32_t
+Chacha20Poly1305_aead_encrypt_(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint32_t mlen,
+ uint8_t *aad1,
+ uint32_t aadlen,
+ uint8_t *k1,
+ uint8_t *n1
+)
+{
+ uint8_t tmp[80] = { 0 };
+ uint8_t *b = tmp;
+ uint8_t *lb = tmp + (uint32_t )64;
+ Chacha20Poly1305_encode_length(lb, aadlen, mlen);
+ Chacha20_chacha20(c, m, mlen, k1, n1, (uint32_t )1);
+ Chacha20_chacha20_key_block(b, k1, n1, (uint32_t )0);
+ Chacha20Poly1305_aead_encrypt_poly(c, mlen, mac, aad1, aadlen, tmp);
+ return (uint32_t )0;
+}
+
+uint32_t
+Chacha20Poly1305_aead_encrypt(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint32_t mlen,
+ uint8_t *aad1,
+ uint32_t aadlen,
+ uint8_t *k1,
+ uint8_t *n1
+)
+{
+ uint32_t z = Chacha20Poly1305_aead_encrypt_(c, mac, m, mlen, aad1, aadlen, k1, n1);
+ return z;
+}
+
+uint32_t
+Chacha20Poly1305_aead_decrypt(
+ uint8_t *m,
+ uint8_t *c,
+ uint32_t mlen,
+ uint8_t *mac,
+ uint8_t *aad1,
+ uint32_t aadlen,
+ uint8_t *k1,
+ uint8_t *n1
+)
+{
+ uint8_t tmp[96] = { 0 };
+ uint8_t *b = tmp;
+ uint8_t *lb = tmp + (uint32_t )64;
+ Chacha20Poly1305_encode_length(lb, aadlen, mlen);
+ uint8_t *rmac = tmp + (uint32_t )80;
+ Chacha20_chacha20_key_block(b, k1, n1, (uint32_t )0);
+ uint8_t *mk = b;
+ (void )(mk + (uint32_t )16);
+ Chacha20Poly1305_aead_encrypt_poly(c, mlen, rmac, aad1, aadlen, tmp);
+ uint8_t result = Hacl_Policies_cmp_bytes(mac, rmac, (uint32_t )16);
+ uint8_t verify = result;
+ uint32_t res;
+ if (verify == (uint8_t )0)
+ {
+ Chacha20_chacha20(m, c, mlen, k1, n1, (uint32_t )1);
+ res = (uint32_t )0;
+ }
+ else
+ res = (uint32_t )1;
+ return res;
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Chacha20Poly1305.h b/sw/airborne/modules/datalink/hacl-c/Chacha20Poly1305.h
new file mode 100644
index 0000000000..e7f377e48b
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Chacha20Poly1305.h
@@ -0,0 +1,58 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Chacha20Poly1305_H
+#define __Chacha20Poly1305_H
+
+
+#include "Hacl_Policies.h"
+#include "Chacha20.h"
+#include "AEAD_Poly1305_64.h"
+
+extern Prims_int Chacha20Poly1305_noncelen;
+
+extern Prims_int Chacha20Poly1305_keylen;
+
+extern Prims_int Chacha20Poly1305_maclen;
+
+typedef Hacl_Impl_Poly1305_64_State_poly1305_state Chacha20Poly1305_state;
+
+typedef void *Chacha20Poly1305_log_t;
+
+void Chacha20Poly1305_encode_length(uint8_t *lb, uint32_t aad_len, uint32_t mlen);
+
+uint32_t
+Chacha20Poly1305_aead_encrypt_(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint32_t mlen,
+ uint8_t *aad1,
+ uint32_t aadlen,
+ uint8_t *k1,
+ uint8_t *n1
+);
+
+uint32_t
+Chacha20Poly1305_aead_encrypt(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint32_t mlen,
+ uint8_t *aad1,
+ uint32_t aadlen,
+ uint8_t *k1,
+ uint8_t *n1
+);
+
+uint32_t
+Chacha20Poly1305_aead_decrypt(
+ uint8_t *m,
+ uint8_t *c,
+ uint32_t mlen,
+ uint8_t *mac,
+ uint8_t *aad1,
+ uint32_t aadlen,
+ uint8_t *k1,
+ uint8_t *n1
+);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Chacha20_Vec128.c b/sw/airborne/modules/datalink/hacl-c/Chacha20_Vec128.c
new file mode 100644
index 0000000000..e96a59b967
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Chacha20_Vec128.c
@@ -0,0 +1,371 @@
+#include "Chacha20_Vec128.h"
+
+inline static void Hacl_Impl_Chacha20_Vec128_State_state_incr(vec *k)
+{
+ vec k3 = k[3];
+ k[3] = vec_add(k3, one_le);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_State_state_to_key_block(uint8_t *stream_block, vec *k)
+{
+ vec k0 = k[0];
+ vec k1 = k[1];
+ vec k2 = k[2];
+ vec k3 = k[3];
+ uint8_t *a = stream_block;
+ uint8_t *b = stream_block + (uint32_t )16;
+ uint8_t *c = stream_block + (uint32_t )32;
+ uint8_t *d = stream_block + (uint32_t )48;
+ vec_store_le(a, k0);
+ vec_store_le(b, k1);
+ vec_store_le(c, k2);
+ vec_store_le(d, k3);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_State_state_setup(vec *st, uint8_t *k, uint8_t *n1, uint32_t c)
+{
+ st[0] =
+ vec_load_32x4((uint32_t )0x61707865,
+ (uint32_t )0x3320646e,
+ (uint32_t )0x79622d32,
+ (uint32_t )0x6b206574);
+ vec k0 = vec_load128_le(k);
+ vec k1 = vec_load128_le(k + (uint32_t )16);
+ st[1] = k0;
+ st[2] = k1;
+ uint32_t n0 = load32_le(n1);
+ uint8_t *x00 = n1 + (uint32_t )4;
+ uint32_t n10 = load32_le(x00);
+ uint8_t *x0 = n1 + (uint32_t )8;
+ uint32_t n2 = load32_le(x0);
+ vec v1 = vec_load_32x4(c, n0, n10, n2);
+ st[3] = v1;
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_line(vec *st, uint32_t a, uint32_t b, uint32_t d, uint32_t s)
+{
+ vec sa = st[a];
+ vec sb = st[b];
+ vec sd = st[d];
+ vec sa1 = vec_add(sa, sb);
+ vec sd1 = vec_rotate_left(vec_xor(sd, sa1), s);
+ st[a] = sa1;
+ st[d] = sd1;
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_round(vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_line(st, (uint32_t )0, (uint32_t )1, (uint32_t )3, (uint32_t )16);
+ Hacl_Impl_Chacha20_Vec128_line(st, (uint32_t )2, (uint32_t )3, (uint32_t )1, (uint32_t )12);
+ Hacl_Impl_Chacha20_Vec128_line(st, (uint32_t )0, (uint32_t )1, (uint32_t )3, (uint32_t )8);
+ Hacl_Impl_Chacha20_Vec128_line(st, (uint32_t )2, (uint32_t )3, (uint32_t )1, (uint32_t )7);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_double_round(vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_round(st);
+ vec r1 = st[1];
+ vec r20 = st[2];
+ vec r30 = st[3];
+ st[1] = vec_shuffle_right(r1, (uint32_t )1);
+ st[2] = vec_shuffle_right(r20, (uint32_t )2);
+ st[3] = vec_shuffle_right(r30, (uint32_t )3);
+ Hacl_Impl_Chacha20_Vec128_round(st);
+ vec r10 = st[1];
+ vec r2 = st[2];
+ vec r3 = st[3];
+ st[1] = vec_shuffle_right(r10, (uint32_t )3);
+ st[2] = vec_shuffle_right(r2, (uint32_t )2);
+ st[3] = vec_shuffle_right(r3, (uint32_t )1);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_double_round3(vec *st, vec *st_, vec *st__)
+{
+ Hacl_Impl_Chacha20_Vec128_double_round(st);
+ Hacl_Impl_Chacha20_Vec128_double_round(st_);
+ Hacl_Impl_Chacha20_Vec128_double_round(st__);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_sum_states(vec *st_, vec *st)
+{
+ vec s0 = st[0];
+ vec s1 = st[1];
+ vec s2 = st[2];
+ vec s3 = st[3];
+ vec s0_ = st_[0];
+ vec s1_ = st_[1];
+ vec s2_ = st_[2];
+ vec s3_ = st_[3];
+ st_[0] = vec_add(s0_, s0);
+ st_[1] = vec_add(s1_, s1);
+ st_[2] = vec_add(s2_, s2);
+ st_[3] = vec_add(s3_, s3);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_copy_state(vec *st_, vec *st)
+{
+ vec uu____3478 = st[0];
+ st_[0] = uu____3478;
+ vec uu____3520 = st[1];
+ st_[1] = uu____3520;
+ vec uu____3562 = st[2];
+ st_[2] = uu____3562;
+ vec uu____3604 = st[3];
+ st_[3] = uu____3604;
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_chacha20_core(vec *k, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_copy_state(k, st);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )10; i = i + (uint32_t )1)
+ Hacl_Impl_Chacha20_Vec128_double_round(k);
+ Hacl_Impl_Chacha20_Vec128_sum_states(k, st);
+}
+
+static void Hacl_Impl_Chacha20_Vec128_state_incr(vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_State_state_incr(st);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_chacha20_incr3(vec *k0, vec *k1, vec *k2, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_copy_state(k0, st);
+ Hacl_Impl_Chacha20_Vec128_copy_state(k1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(k1);
+ Hacl_Impl_Chacha20_Vec128_copy_state(k2, k1);
+ Hacl_Impl_Chacha20_Vec128_state_incr(k2);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_chacha20_sum3(vec *k0, vec *k1, vec *k2, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_sum_states(k0, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ Hacl_Impl_Chacha20_Vec128_sum_states(k1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ Hacl_Impl_Chacha20_Vec128_sum_states(k2, st);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_chacha20_core3(vec *k0, vec *k1, vec *k2, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_chacha20_incr3(k0, k1, k2, st);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )10; i = i + (uint32_t )1)
+ Hacl_Impl_Chacha20_Vec128_double_round3(k0, k1, k2);
+ Hacl_Impl_Chacha20_Vec128_chacha20_sum3(k0, k1, k2, st);
+}
+
+inline static void Hacl_Impl_Chacha20_Vec128_chacha20_block(uint8_t *stream_block, vec *st)
+{
+ KRML_CHECK_SIZE(zero, (uint32_t )4);
+ vec k[4];
+ for (uintmax_t _i = 0; _i < (uint32_t )4; ++_i)
+ k[_i] = zero;
+ Hacl_Impl_Chacha20_Vec128_chacha20_core(k, st);
+ Hacl_Impl_Chacha20_Vec128_State_state_to_key_block(stream_block, k);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_init(vec *st, uint8_t *k, uint8_t *n1, uint32_t ctr)
+{
+ Hacl_Impl_Chacha20_Vec128_State_state_setup(st, k, n1, ctr);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_update_last(uint8_t *output, uint8_t *plain, uint32_t len, vec *st)
+{
+ uint8_t block[64] = { 0 };
+ Hacl_Impl_Chacha20_Vec128_chacha20_block(block, st);
+ uint8_t *mask = block;
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t uu____602 = plain[i];
+ uint8_t uu____605 = mask[i];
+ uint8_t uu____601 = uu____602 ^ uu____605;
+ output[i] = uu____601;
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_store_4_vec(uint8_t *output, vec v0, vec v1, vec v2, vec v3)
+{
+ uint8_t *o0 = output;
+ uint8_t *o1 = output + (uint32_t )16;
+ uint8_t *o2 = output + (uint32_t )32;
+ uint8_t *o3 = output + (uint32_t )48;
+ vec_store_le(o0, v0);
+ vec_store_le(o1, v1);
+ vec_store_le(o2, v2);
+ vec_store_le(o3, v3);
+}
+
+static void Hacl_Impl_Chacha20_Vec128_xor_block(uint8_t *output, uint8_t *plain, vec *st)
+{
+ vec p0 = vec_load_le(plain);
+ vec p1 = vec_load_le(plain + (uint32_t )16);
+ vec p2 = vec_load_le(plain + (uint32_t )32);
+ vec p3 = vec_load_le(plain + (uint32_t )48);
+ vec k0 = st[0];
+ vec k1 = st[1];
+ vec k2 = st[2];
+ vec k3 = st[3];
+ vec o0 = vec_xor(p0, k0);
+ vec o1 = vec_xor(p1, k1);
+ vec o2 = vec_xor(p2, k2);
+ vec o3 = vec_xor(p3, k3);
+ Hacl_Impl_Chacha20_Vec128_store_4_vec(output, o0, o1, o2, o3);
+}
+
+static void Hacl_Impl_Chacha20_Vec128_update(uint8_t *output, uint8_t *plain, vec *st)
+{
+ KRML_CHECK_SIZE(zero, (uint32_t )4);
+ vec k[4];
+ for (uintmax_t _i = 0; _i < (uint32_t )4; ++_i)
+ k[_i] = zero;
+ Hacl_Impl_Chacha20_Vec128_chacha20_core(k, st);
+ Hacl_Impl_Chacha20_Vec128_xor_block(output, plain, k);
+}
+
+static void Hacl_Impl_Chacha20_Vec128_update3(uint8_t *output, uint8_t *plain, vec *st)
+{
+ KRML_CHECK_SIZE(zero, (uint32_t )4);
+ vec k0[4];
+ for (uintmax_t _i = 0; _i < (uint32_t )4; ++_i)
+ k0[_i] = zero;
+ KRML_CHECK_SIZE(zero, (uint32_t )4);
+ vec k1[4];
+ for (uintmax_t _i = 0; _i < (uint32_t )4; ++_i)
+ k1[_i] = zero;
+ KRML_CHECK_SIZE(zero, (uint32_t )4);
+ vec k2[4];
+ for (uintmax_t _i = 0; _i < (uint32_t )4; ++_i)
+ k2[_i] = zero;
+ Hacl_Impl_Chacha20_Vec128_chacha20_core3(k0, k1, k2, st);
+ uint8_t *p0 = plain;
+ uint8_t *p1 = plain + (uint32_t )64;
+ uint8_t *p2 = plain + (uint32_t )128;
+ uint8_t *o0 = output;
+ uint8_t *o1 = output + (uint32_t )64;
+ uint8_t *o2 = output + (uint32_t )128;
+ Hacl_Impl_Chacha20_Vec128_xor_block(o0, p0, k0);
+ Hacl_Impl_Chacha20_Vec128_xor_block(o1, p1, k1);
+ Hacl_Impl_Chacha20_Vec128_xor_block(o2, p2, k2);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_update3_(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st,
+ uint32_t i
+)
+{
+ uint8_t *out_block = output + (uint32_t )192 * i;
+ uint8_t *plain_block = plain + (uint32_t )192 * i;
+ Hacl_Impl_Chacha20_Vec128_update3(out_block, plain_block, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks3(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st
+)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ Hacl_Impl_Chacha20_Vec128_update3_(output, plain, len, st, i);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st
+)
+{
+ uint32_t len3 = len / (uint32_t )3;
+ uint32_t rest3 = len % (uint32_t )3;
+ uint8_t *plain_ = plain;
+ uint8_t *blocks1 = plain + (uint32_t )192 * len3;
+ uint8_t *output_ = output;
+ uint8_t *outs = output + (uint32_t )192 * len3;
+ Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks3(output_, plain_, len3, st);
+ if (rest3 == (uint32_t )2)
+ {
+ uint8_t *block0 = blocks1;
+ uint8_t *block1 = blocks1 + (uint32_t )64;
+ uint8_t *out0 = outs;
+ uint8_t *out1 = outs + (uint32_t )64;
+ Hacl_Impl_Chacha20_Vec128_update(out0, block0, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ Hacl_Impl_Chacha20_Vec128_update(out1, block1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ }
+ else if (rest3 == (uint32_t )1)
+ {
+ Hacl_Impl_Chacha20_Vec128_update(outs, blocks1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st
+)
+{
+ uint32_t blocks_len = len >> (uint32_t )6;
+ uint32_t part_len = len & (uint32_t )0x3f;
+ uint8_t *output_ = output;
+ uint8_t *plain_ = plain;
+ uint8_t *output__ = output + (uint32_t )64 * blocks_len;
+ uint8_t *plain__ = plain + (uint32_t )64 * blocks_len;
+ Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks(output_, plain_, blocks_len, st);
+ if (part_len > (uint32_t )0)
+ Hacl_Impl_Chacha20_Vec128_update_last(output__, plain__, part_len, st);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr
+)
+{
+ KRML_CHECK_SIZE(zero, (uint32_t )4);
+ vec buf[4];
+ for (uintmax_t _i = 0; _i < (uint32_t )4; ++_i)
+ buf[_i] = zero;
+ vec *st = buf;
+ Hacl_Impl_Chacha20_Vec128_init(st, k, n1, ctr);
+ Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode(output, plain, len, st);
+}
+
+void *Chacha20_Vec128_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b)
+{
+ return (void *)(uint8_t )0;
+}
+
+void
+Chacha20_Vec128_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr
+)
+{
+ Hacl_Impl_Chacha20_Vec128_chacha20(output, plain, len, k, n1, ctr);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Chacha20_Vec128.h b/sw/airborne/modules/datalink/hacl-c/Chacha20_Vec128.h
new file mode 100644
index 0000000000..e86b355afc
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Chacha20_Vec128.h
@@ -0,0 +1,54 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Chacha20_Vec128_H
+#define __Chacha20_Vec128_H
+
+
+
+#include "testlib.h"
+#include "vec128.h"
+
+typedef uint32_t Hacl_Impl_Xor_Lemmas_u32;
+
+typedef uint8_t Hacl_Impl_Xor_Lemmas_u8;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_State_u32;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_State_h32;
+
+typedef uint8_t *Hacl_Impl_Chacha20_Vec128_State_uint8_p;
+
+typedef vec *Hacl_Impl_Chacha20_Vec128_State_state;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_u32;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_h32;
+
+typedef uint8_t *Hacl_Impl_Chacha20_Vec128_uint8_p;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_idx;
+
+typedef struct
+{
+ void *k;
+ void *n;
+ uint32_t ctr;
+}
+Hacl_Impl_Chacha20_Vec128_log_t_;
+
+typedef void *Hacl_Impl_Chacha20_Vec128_log_t;
+
+typedef uint8_t *Chacha20_Vec128_uint8_p;
+
+void *Chacha20_Vec128_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b);
+
+void
+Chacha20_Vec128_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr
+);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Curve25519.c b/sw/airborne/modules/datalink/hacl-c/Curve25519.c
new file mode 100644
index 0000000000..741b635496
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Curve25519.c
@@ -0,0 +1,1000 @@
+#include "Curve25519.h"
+
+static void Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+ uint64_t b4 = b[4];
+ uint64_t b0 = b[0];
+ uint64_t mask = ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t b4_ = b4 & mask;
+ uint64_t b0_ = b0 + (uint64_t )19 * (b4 >> (uint32_t )51);
+ b[4] = b4_;
+ b[0] = b0_;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
+{
+ {
+ FStar_UInt128_t uu____429 = input[0];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[0] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[1];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[1] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[2];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[2] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[3];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[3] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[4];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[4] = uu____428;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_shift(uint64_t *output)
+{
+ uint64_t tmp = output[4];
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )0 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )1 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )2 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )3 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ output[0] = tmp;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
+ FStar_UInt128_t *output,
+ uint64_t *input,
+ uint64_t s
+)
+{
+ {
+ FStar_UInt128_t uu____871 = output[0];
+ uint64_t uu____874 = input[0];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[0] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[1];
+ uint64_t uu____874 = input[1];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[1] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[2];
+ uint64_t uu____874 = input[2];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[2] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[3];
+ uint64_t uu____874 = input[3];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[3] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[4];
+ uint64_t uu____874 = input[4];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[4] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
+{
+ {
+ uint32_t ctr = (uint32_t )0;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t )1;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t )2;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t )3;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+}
+
+inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+ Hacl_Bignum_Fproduct_shift(output);
+ uint64_t b0 = output[0];
+ output[0] = (uint64_t )19 * b0;
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input21)
+{
+ {
+ uint64_t input2i = input21[0];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[1];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[2];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[3];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ uint32_t i = (uint32_t )4;
+ uint64_t input2i = input21[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void Hacl_Bignum_Fmul_fmul_(uint64_t *output, uint64_t *input, uint64_t *input21)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t t[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Bignum_Fmul_mul_shift_reduce_(t, input, input21);
+ Hacl_Bignum_Fproduct_carry_wide_(t);
+ FStar_UInt128_t b4 = t[4];
+ FStar_UInt128_t b0 = t[0];
+ FStar_UInt128_t
+ mask =
+ FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1),
+ (uint32_t )51),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1));
+ FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t )19,
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t )51))));
+ t[4] = b4_;
+ t[0] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+ uint64_t i0 = output[0];
+ uint64_t i1 = output[1];
+ uint64_t i0_ = i0 & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )51);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input21)
+{
+ uint64_t tmp[5] = { 0 };
+ memcpy(tmp, input, (uint32_t )5 * sizeof input[0]);
+ Hacl_Bignum_Fmul_fmul_(output, tmp, input21);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_upd_5(
+ FStar_UInt128_t *tmp,
+ FStar_UInt128_t s0,
+ FStar_UInt128_t s1,
+ FStar_UInt128_t s2,
+ FStar_UInt128_t s3,
+ FStar_UInt128_t s4
+)
+{
+ tmp[0] = s0;
+ tmp[1] = s1;
+ tmp[2] = s2;
+ tmp[3] = s3;
+ tmp[4] = s4;
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare__(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ uint64_t r0 = output[0];
+ uint64_t r1 = output[1];
+ uint64_t r2 = output[2];
+ uint64_t r3 = output[3];
+ uint64_t r4 = output[4];
+ uint64_t d0 = r0 * (uint64_t )2;
+ uint64_t d1 = r1 * (uint64_t )2;
+ uint64_t d2 = r2 * (uint64_t )2 * (uint64_t )19;
+ uint64_t d419 = r4 * (uint64_t )19;
+ uint64_t d4 = d419 * (uint64_t )2;
+ FStar_UInt128_t
+ s0 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(r0, r0),
+ FStar_UInt128_mul_wide(d4, r1)),
+ FStar_UInt128_mul_wide(d2, r3));
+ FStar_UInt128_t
+ s1 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r1),
+ FStar_UInt128_mul_wide(d4, r2)),
+ FStar_UInt128_mul_wide(r3 * (uint64_t )19, r3));
+ FStar_UInt128_t
+ s2 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r2),
+ FStar_UInt128_mul_wide(r1, r1)),
+ FStar_UInt128_mul_wide(d4, r3));
+ FStar_UInt128_t
+ s3 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r3),
+ FStar_UInt128_mul_wide(d1, r2)),
+ FStar_UInt128_mul_wide(r4, d419));
+ FStar_UInt128_t
+ s4 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r4),
+ FStar_UInt128_mul_wide(d1, r3)),
+ FStar_UInt128_mul_wide(r2, r2));
+ Hacl_Bignum_Fsquare_upd_5(tmp, s0, s1, s2, s3, s4);
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare_(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ Hacl_Bignum_Fsquare_fsquare__(tmp, output);
+ Hacl_Bignum_Fproduct_carry_wide_(tmp);
+ FStar_UInt128_t b4 = tmp[4];
+ FStar_UInt128_t b0 = tmp[0];
+ FStar_UInt128_t
+ mask =
+ FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1),
+ (uint32_t )51),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1));
+ FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t )19,
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t )51))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+ uint64_t i0 = output[0];
+ uint64_t i1 = output[1];
+ uint64_t i0_ = i0 & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )51);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+static void
+Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *input, FStar_UInt128_t *tmp, uint32_t count1)
+{
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+ for (uint32_t i = (uint32_t )1; i < count1; i = i + (uint32_t )1)
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t t[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ memcpy(output, input, (uint32_t )5 * sizeof input[0]);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t t[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
+{
+ uint64_t buf[20] = { 0 };
+ uint64_t *a = buf;
+ uint64_t *t00 = buf + (uint32_t )5;
+ uint64_t *b0 = buf + (uint32_t )10;
+ (void )(buf + (uint32_t )15);
+ Hacl_Bignum_Fsquare_fsquare_times(a, z, (uint32_t )1);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t )2);
+ Hacl_Bignum_Fmul_fmul(b0, t00, z);
+ Hacl_Bignum_Fmul_fmul(a, b0, a);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t )1);
+ Hacl_Bignum_Fmul_fmul(b0, t00, b0);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t )5);
+ uint64_t *t01 = buf + (uint32_t )5;
+ uint64_t *b1 = buf + (uint32_t )10;
+ uint64_t *c0 = buf + (uint32_t )15;
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t )10);
+ Hacl_Bignum_Fmul_fmul(c0, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t )20);
+ Hacl_Bignum_Fmul_fmul(t01, t01, c0);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t )10);
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t )50);
+ uint64_t *a0 = buf;
+ uint64_t *t0 = buf + (uint32_t )5;
+ uint64_t *b = buf + (uint32_t )10;
+ uint64_t *c = buf + (uint32_t )15;
+ Hacl_Bignum_Fmul_fmul(c, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t )100);
+ Hacl_Bignum_Fmul_fmul(t0, t0, c);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t )50);
+ Hacl_Bignum_Fmul_fmul(t0, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t )5);
+ Hacl_Bignum_Fmul_fmul(out, t0, a0);
+}
+
+inline static void Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
+{
+ {
+ uint64_t uu____871 = a[0];
+ uint64_t uu____874 = b[0];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[0] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[1];
+ uint64_t uu____874 = b[1];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[1] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[2];
+ uint64_t uu____874 = b[2];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[2] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[3];
+ uint64_t uu____874 = b[3];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[3] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[4];
+ uint64_t uu____874 = b[4];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[4] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
+{
+ uint64_t tmp[5] = { 0 };
+ memcpy(tmp, b, (uint32_t )5 * sizeof b[0]);
+ uint64_t b0 = tmp[0];
+ uint64_t b1 = tmp[1];
+ uint64_t b2 = tmp[2];
+ uint64_t b3 = tmp[3];
+ uint64_t b4 = tmp[4];
+ tmp[0] = b0 + (uint64_t )0x3fffffffffff68;
+ tmp[1] = b1 + (uint64_t )0x3ffffffffffff8;
+ tmp[2] = b2 + (uint64_t )0x3ffffffffffff8;
+ tmp[3] = b3 + (uint64_t )0x3ffffffffffff8;
+ tmp[4] = b4 + (uint64_t )0x3ffffffffffff8;
+ {
+ uint64_t uu____871 = a[0];
+ uint64_t uu____874 = tmp[0];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[0] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[1];
+ uint64_t uu____874 = tmp[1];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[1] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[2];
+ uint64_t uu____874 = tmp[2];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[2] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[3];
+ uint64_t uu____874 = tmp[3];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[3] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[4];
+ uint64_t uu____874 = tmp[4];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[4] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t tmp[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ tmp[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ {
+ uint64_t uu____429 = b[0];
+ FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
+ tmp[0] = uu____428;
+ }
+ {
+ uint64_t uu____429 = b[1];
+ FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
+ tmp[1] = uu____428;
+ }
+ {
+ uint64_t uu____429 = b[2];
+ FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
+ tmp[2] = uu____428;
+ }
+ {
+ uint64_t uu____429 = b[3];
+ FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
+ tmp[3] = uu____428;
+ }
+ {
+ uint64_t uu____429 = b[4];
+ FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
+ tmp[4] = uu____428;
+ }
+ Hacl_Bignum_Fproduct_carry_wide_(tmp);
+ FStar_UInt128_t b4 = tmp[4];
+ FStar_UInt128_t b0 = tmp[0];
+ FStar_UInt128_t
+ mask =
+ FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1),
+ (uint32_t )51),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1));
+ FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t )19,
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t )51))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+}
+
+inline static void Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum_Fmul_fmul(output, a, b);
+}
+
+inline static void Hacl_Bignum_crecip(uint64_t *output, uint64_t *input)
+{
+ Hacl_Bignum_Crecip_crecip(output, input);
+}
+
+static void
+Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+ uint32_t i = ctr - (uint32_t )1;
+ uint64_t ai = a[i];
+ uint64_t bi = b[i];
+ uint64_t x = swap1 & (ai ^ bi);
+ uint64_t ai1 = ai ^ x;
+ uint64_t bi1 = bi ^ x;
+ a[i] = ai1;
+ b[i] = bi1;
+}
+
+static void
+Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+ if (ctr == (uint32_t )0)
+ {
+
+ }
+ else
+ {
+ Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr);
+ uint32_t i = ctr - (uint32_t )1;
+ Hacl_EC_Point_swap_conditional_(a, b, swap1, i);
+ }
+}
+
+static void Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap)
+{
+ uint64_t swap1 = (uint64_t )0 - iswap;
+ Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t )5);
+ Hacl_EC_Point_swap_conditional_(a + (uint32_t )5, b + (uint32_t )5, swap1, (uint32_t )5);
+}
+
+static void Hacl_EC_Point_copy(uint64_t *output, uint64_t *input)
+{
+ memcpy(output, input, (uint32_t )5 * sizeof input[0]);
+ memcpy(output + (uint32_t )5,
+ input + (uint32_t )5,
+ (uint32_t )5 * sizeof (input + (uint32_t )5)[0]);
+}
+
+static void
+Hacl_EC_AddAndDouble_fmonty(
+ uint64_t *pp,
+ uint64_t *ppq,
+ uint64_t *p,
+ uint64_t *pq,
+ uint64_t *qmqp
+)
+{
+ uint64_t *qx = qmqp;
+ uint64_t *x2 = pp;
+ uint64_t *z2 = pp + (uint32_t )5;
+ uint64_t *x3 = ppq;
+ uint64_t *z3 = ppq + (uint32_t )5;
+ uint64_t *x = p;
+ uint64_t *z = p + (uint32_t )5;
+ uint64_t *xprime = pq;
+ uint64_t *zprime = pq + (uint32_t )5;
+ uint64_t buf[40] = { 0 };
+ (void )(buf + (uint32_t )5);
+ (void )(buf + (uint32_t )10);
+ (void )(buf + (uint32_t )15);
+ (void )(buf + (uint32_t )20);
+ (void )(buf + (uint32_t )25);
+ (void )(buf + (uint32_t )30);
+ (void )(buf + (uint32_t )35);
+ uint64_t *origx = buf;
+ uint64_t *origxprime = buf + (uint32_t )5;
+ (void )(buf + (uint32_t )10);
+ (void )(buf + (uint32_t )15);
+ (void )(buf + (uint32_t )20);
+ uint64_t *xxprime0 = buf + (uint32_t )25;
+ uint64_t *zzprime0 = buf + (uint32_t )30;
+ (void )(buf + (uint32_t )35);
+ memcpy(origx, x, (uint32_t )5 * sizeof x[0]);
+ Hacl_Bignum_fsum(x, z);
+ Hacl_Bignum_fdifference(z, origx);
+ memcpy(origxprime, xprime, (uint32_t )5 * sizeof xprime[0]);
+ Hacl_Bignum_fsum(xprime, zprime);
+ Hacl_Bignum_fdifference(zprime, origxprime);
+ Hacl_Bignum_fmul(xxprime0, xprime, z);
+ Hacl_Bignum_fmul(zzprime0, x, zprime);
+ uint64_t *origxprime0 = buf + (uint32_t )5;
+ (void )(buf + (uint32_t )10);
+ uint64_t *xx0 = buf + (uint32_t )15;
+ uint64_t *zz0 = buf + (uint32_t )20;
+ uint64_t *xxprime = buf + (uint32_t )25;
+ uint64_t *zzprime = buf + (uint32_t )30;
+ uint64_t *zzzprime = buf + (uint32_t )35;
+ memcpy(origxprime0, xxprime, (uint32_t )5 * sizeof xxprime[0]);
+ Hacl_Bignum_fsum(xxprime, zzprime);
+ Hacl_Bignum_fdifference(zzprime, origxprime0);
+ Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t )1);
+ Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t )1);
+ Hacl_Bignum_fmul(z3, zzzprime, qx);
+ Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t )1);
+ Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t )1);
+ (void )(buf + (uint32_t )5);
+ uint64_t *zzz = buf + (uint32_t )10;
+ uint64_t *xx = buf + (uint32_t )15;
+ uint64_t *zz = buf + (uint32_t )20;
+ (void )(buf + (uint32_t )25);
+ (void )(buf + (uint32_t )30);
+ (void )(buf + (uint32_t )35);
+ Hacl_Bignum_fmul(x2, xx, zz);
+ Hacl_Bignum_fdifference(zz, xx);
+ uint64_t scalar = (uint64_t )121665;
+ Hacl_Bignum_fscalar(zzz, zz, scalar);
+ Hacl_Bignum_fsum(zzz, xx);
+ Hacl_Bignum_fmul(z2, zzz, zz);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_1(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt
+)
+{
+ uint64_t bit = (uint64_t )(byt >> (uint32_t )7);
+ Hacl_EC_Point_swap_conditional(nq, nqpq, bit);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_2(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt
+)
+{
+ Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt
+)
+{
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_1(nq, nqpq, nq2, nqpq2, q, byt);
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_2(nq, nqpq, nq2, nqpq2, q, byt);
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_1(nq2, nqpq2, nq, nqpq, q, byt);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt
+)
+{
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
+ uint8_t byt1 = byt << (uint32_t )1;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt,
+ uint32_t i
+)
+{
+ if (i == (uint32_t )0)
+ {
+
+ }
+ else
+ {
+ uint32_t i_ = i - (uint32_t )1;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
+ uint8_t byt_ = byt << (uint32_t )2;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_);
+ }
+}
+
+static void
+Hacl_EC_Ladder_BigLoop_cmult_big_loop(
+ uint8_t *n1,
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint32_t i
+)
+{
+ if (i == (uint32_t )0)
+ {
+
+ }
+ else
+ {
+ uint32_t i1 = i - (uint32_t )1;
+ uint8_t byte = n1[i1];
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t )4);
+ Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1);
+ }
+}
+
+static void
+Hacl_EC_Ladder_cmult_(uint64_t *result, uint64_t *point_buf, uint8_t *n1, uint64_t *q)
+{
+ uint64_t *nq = point_buf;
+ uint64_t *nqpq = point_buf + (uint32_t )10;
+ uint64_t *nq2 = point_buf + (uint32_t )20;
+ uint64_t *nqpq2 = point_buf + (uint32_t )30;
+ Hacl_EC_Point_copy(nqpq, q);
+ nq[0] = (uint64_t )1;
+ Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t )32);
+ Hacl_EC_Point_copy(result, nq);
+}
+
+static void Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q)
+{
+ uint64_t point_buf[40] = { 0 };
+ Hacl_EC_Ladder_cmult_(result, point_buf, n1, q);
+}
+
+static void
+Hacl_EC_Format_upd_5(
+ uint64_t *output,
+ uint64_t output0,
+ uint64_t output1,
+ uint64_t output2,
+ uint64_t output3,
+ uint64_t output4
+)
+{
+ output[0] = output0;
+ output[1] = output1;
+ output[2] = output2;
+ output[3] = output3;
+ output[4] = output4;
+}
+
+static void
+Hacl_EC_Format_upd_5_(
+ uint64_t *output,
+ uint64_t output0,
+ uint64_t output1,
+ uint64_t output2,
+ uint64_t output3,
+ uint64_t output4
+)
+{
+ output[0] = output0;
+ output[1] = output1;
+ output[2] = output2;
+ output[3] = output3;
+ output[4] = output4;
+}
+
+static void Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
+{
+ uint64_t mask_511 = (uint64_t )0x7ffffffffffff;
+ uint64_t i0 = load64_le(input);
+ uint8_t *x00 = input + (uint32_t )6;
+ uint64_t i1 = load64_le(x00);
+ uint8_t *x01 = input + (uint32_t )12;
+ uint64_t i2 = load64_le(x01);
+ uint8_t *x02 = input + (uint32_t )19;
+ uint64_t i3 = load64_le(x02);
+ uint8_t *x0 = input + (uint32_t )24;
+ uint64_t i4 = load64_le(x0);
+ uint64_t output0 = i0 & mask_511;
+ uint64_t output1 = i1 >> (uint32_t )3 & mask_511;
+ uint64_t output2 = i2 >> (uint32_t )6 & mask_511;
+ uint64_t output3 = i3 >> (uint32_t )1 & mask_511;
+ uint64_t output4 = i4 >> (uint32_t )12 & mask_511;
+ Hacl_EC_Format_upd_5(output, output0, output1, output2, output3, output4);
+}
+
+static void
+Hacl_EC_Format_store_4(uint8_t *output, uint64_t v0, uint64_t v1, uint64_t v2, uint64_t v3)
+{
+ uint8_t *b0 = output;
+ uint8_t *b1 = output + (uint32_t )8;
+ uint8_t *b2 = output + (uint32_t )16;
+ uint8_t *b3 = output + (uint32_t )24;
+ store64_le(b0, v0);
+ store64_le(b1, v1);
+ store64_le(b2, v2);
+ store64_le(b3, v3);
+}
+
+static void Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0];
+ uint64_t t1 = input[1];
+ uint64_t t2 = input[2];
+ uint64_t t3 = input[3];
+ uint64_t t4 = input[4];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t )51);
+ uint64_t t0_ = t0 & (uint64_t )0x7ffffffffffff;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t )51);
+ uint64_t t1__ = t1_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t )51);
+ uint64_t t2__ = t2_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t )51);
+ uint64_t t3__ = t3_ & (uint64_t )0x7ffffffffffff;
+ Hacl_EC_Format_upd_5_(input, t0_, t1__, t2__, t3__, t4_);
+}
+
+static void Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_first_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+}
+
+static void Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0];
+ uint64_t t1 = input[1];
+ uint64_t t2 = input[2];
+ uint64_t t3 = input[3];
+ uint64_t t4 = input[4];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t )51);
+ uint64_t t0_ = t0 & (uint64_t )0x7ffffffffffff;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t )51);
+ uint64_t t1__ = t1_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t )51);
+ uint64_t t2__ = t2_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t )51);
+ uint64_t t3__ = t3_ & (uint64_t )0x7ffffffffffff;
+ Hacl_EC_Format_upd_5_(input, t0_, t1__, t2__, t3__, t4_);
+}
+
+static void Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_second_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+ uint64_t i0 = input[0];
+ uint64_t i1 = input[1];
+ uint64_t i0_ = i0 & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )51);
+ input[0] = i0_;
+ input[1] = i1_;
+}
+
+static void Hacl_EC_Format_fcontract_trim(uint64_t *input)
+{
+ uint64_t a0 = input[0];
+ uint64_t a1 = input[1];
+ uint64_t a2 = input[2];
+ uint64_t a3 = input[3];
+ uint64_t a4 = input[4];
+ uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t )0x7ffffffffffed);
+ uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t )0x7ffffffffffff);
+ uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t )0x7ffffffffffff);
+ uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t )0x7ffffffffffff);
+ uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t )0x7ffffffffffff);
+ uint64_t mask = mask0 & mask1 & mask2 & mask3 & mask4;
+ uint64_t a0_ = a0 - ((uint64_t )0x7ffffffffffed & mask);
+ uint64_t a1_ = a1 - ((uint64_t )0x7ffffffffffff & mask);
+ uint64_t a2_ = a2 - ((uint64_t )0x7ffffffffffff & mask);
+ uint64_t a3_ = a3 - ((uint64_t )0x7ffffffffffff & mask);
+ uint64_t a4_ = a4 - ((uint64_t )0x7ffffffffffff & mask);
+ Hacl_EC_Format_upd_5_(input, a0_, a1_, a2_, a3_, a4_);
+}
+
+static void Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input)
+{
+ uint64_t t0 = input[0];
+ uint64_t t1 = input[1];
+ uint64_t t2 = input[2];
+ uint64_t t3 = input[3];
+ uint64_t t4 = input[4];
+ uint64_t o0 = t1 << (uint32_t )51 | t0;
+ uint64_t o1 = t2 << (uint32_t )38 | t1 >> (uint32_t )13;
+ uint64_t o2 = t3 << (uint32_t )25 | t2 >> (uint32_t )26;
+ uint64_t o3 = t4 << (uint32_t )12 | t3 >> (uint32_t )39;
+ Hacl_EC_Format_store_4(output, o0, o1, o2, o3);
+}
+
+static void Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_first_carry_full(input);
+ Hacl_EC_Format_fcontract_second_carry_full(input);
+ Hacl_EC_Format_fcontract_trim(input);
+ Hacl_EC_Format_fcontract_store(output, input);
+}
+
+static void Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point)
+{
+ uint64_t *x = point;
+ uint64_t *z = point + (uint32_t )5;
+ uint64_t buf[10] = { 0 };
+ uint64_t *zmone = buf;
+ uint64_t *sc = buf + (uint32_t )5;
+ Hacl_Bignum_crecip(zmone, z);
+ Hacl_Bignum_fmul(sc, x, zmone);
+ Hacl_EC_Format_fcontract(scalar, sc);
+}
+
+static void
+Hacl_EC_crypto_scalarmult__(
+ uint8_t *mypublic,
+ uint8_t *scalar,
+ uint8_t *basepoint,
+ uint64_t *q
+)
+{
+ uint64_t buf[15] = { 0 };
+ uint64_t *nq = buf;
+ uint64_t *x = nq;
+ (void )(nq + (uint32_t )5);
+ (void )(buf + (uint32_t )5);
+ x[0] = (uint64_t )1;
+ Hacl_EC_Ladder_cmult(nq, scalar, q);
+ Hacl_EC_Format_scalar_of_point(mypublic, nq);
+}
+
+static void
+Hacl_EC_crypto_scalarmult_(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint, uint64_t *q)
+{
+ uint8_t e[32] = { 0 };
+ memcpy(e, secret, (uint32_t )32 * sizeof secret[0]);
+ uint8_t e0 = e[0];
+ uint8_t e31 = e[31];
+ uint8_t e01 = e0 & (uint8_t )248;
+ uint8_t e311 = e31 & (uint8_t )127;
+ uint8_t e312 = e311 | (uint8_t )64;
+ e[0] = e01;
+ e[31] = e312;
+ uint8_t *scalar = e;
+ Hacl_EC_crypto_scalarmult__(mypublic, scalar, basepoint, q);
+}
+
+void Hacl_EC_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
+{
+ uint64_t buf[10] = { 0 };
+ uint64_t *x = buf;
+ uint64_t *z = buf + (uint32_t )5;
+ Hacl_EC_Format_fexpand(x, basepoint);
+ z[0] = (uint64_t )1;
+ uint64_t *q = buf;
+ Hacl_EC_crypto_scalarmult_(mypublic, secret, basepoint, q);
+}
+
+void *Curve25519_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b)
+{
+ return (void *)(uint8_t )0;
+}
+
+void Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
+{
+ Hacl_EC_crypto_scalarmult(mypublic, secret, basepoint);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Curve25519.h b/sw/airborne/modules/datalink/hacl-c/Curve25519.h
new file mode 100644
index 0000000000..5ebe3a8995
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Curve25519.h
@@ -0,0 +1,49 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Curve25519_H
+#define __Curve25519_H
+
+
+
+#include "testlib.h"
+
+typedef uint64_t Hacl_Bignum_Constants_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Constants_wide;
+
+typedef uint64_t Hacl_Bignum_Parameters_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Parameters_wide;
+
+typedef uint32_t Hacl_Bignum_Parameters_ctr;
+
+typedef uint64_t *Hacl_Bignum_Parameters_felem;
+
+typedef FStar_UInt128_t *Hacl_Bignum_Parameters_felem_wide;
+
+typedef void *Hacl_Bignum_Parameters_seqelem;
+
+typedef void *Hacl_Bignum_Parameters_seqelem_wide;
+
+typedef FStar_UInt128_t Hacl_Bignum_Wide_t;
+
+typedef uint64_t Hacl_Bignum_Limb_t;
+
+extern void Hacl_Bignum_lemma_diff(Prims_int x0, Prims_int x1, Prims_pos x2);
+
+typedef uint64_t *Hacl_EC_Point_point;
+
+typedef uint8_t *Hacl_EC_Ladder_SmallLoop_uint8_p;
+
+typedef uint8_t *Hacl_EC_Ladder_uint8_p;
+
+typedef uint8_t *Hacl_EC_Format_uint8_p;
+
+void Hacl_EC_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint);
+
+typedef uint8_t *Curve25519_uint8_p;
+
+void *Curve25519_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b);
+
+void Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Ed25519.c b/sw/airborne/modules/datalink/hacl-c/Ed25519.c
new file mode 100644
index 0000000000..4cad6727ad
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Ed25519.c
@@ -0,0 +1,2987 @@
+#include "Ed25519.h"
+
+static void
+Hacl_Lib_Create64_make_h64_5(
+ uint64_t *b,
+ uint64_t s0,
+ uint64_t s1,
+ uint64_t s2,
+ uint64_t s3,
+ uint64_t s4
+)
+{
+ b[0] = s0;
+ b[1] = s1;
+ b[2] = s2;
+ b[3] = s3;
+ b[4] = s4;
+}
+
+static void
+Hacl_Lib_Create64_make_h64_10(
+ uint64_t *b,
+ uint64_t s0,
+ uint64_t s1,
+ uint64_t s2,
+ uint64_t s3,
+ uint64_t s4,
+ uint64_t s5,
+ uint64_t s6,
+ uint64_t s7,
+ uint64_t s8,
+ uint64_t s9
+)
+{
+ b[0] = s0;
+ b[1] = s1;
+ b[2] = s2;
+ b[3] = s3;
+ b[4] = s4;
+ b[5] = s5;
+ b[6] = s6;
+ b[7] = s7;
+ b[8] = s8;
+ b[9] = s9;
+}
+
+static void Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+ uint64_t b4 = b[4];
+ uint64_t b0 = b[0];
+ uint64_t mask = ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t b4_ = b4 & mask;
+ uint64_t b0_ = b0 + (uint64_t )19 * (b4 >> (uint32_t )51);
+ b[4] = b4_;
+ b[0] = b0_;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
+{
+ {
+ FStar_UInt128_t uu____429 = input[0];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[0] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[1];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[1] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[2];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[2] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[3];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[3] = uu____428;
+ }
+ {
+ FStar_UInt128_t uu____429 = input[4];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[4] = uu____428;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_shift(uint64_t *output)
+{
+ uint64_t tmp = output[4];
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )0 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )1 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )2 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t )5 - (uint32_t )3 - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ output[0] = tmp;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
+ FStar_UInt128_t *output,
+ uint64_t *input,
+ uint64_t s
+)
+{
+ {
+ FStar_UInt128_t uu____871 = output[0];
+ uint64_t uu____874 = input[0];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[0] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[1];
+ uint64_t uu____874 = input[1];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[1] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[2];
+ uint64_t uu____874 = input[2];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[2] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[3];
+ uint64_t uu____874 = input[3];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[3] = uu____870;
+ }
+ {
+ FStar_UInt128_t uu____871 = output[4];
+ uint64_t uu____874 = input[4];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[4] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
+{
+ {
+ uint32_t ctr = (uint32_t )0;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t )1;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t )2;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t )3;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )51);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+}
+
+inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+ Hacl_Bignum_Fproduct_shift(output);
+ uint64_t b0 = output[0];
+ output[0] = (uint64_t )19 * b0;
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input21)
+{
+ {
+ uint64_t input2i = input21[0];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[1];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[2];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[3];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ uint32_t i = (uint32_t )4;
+ uint64_t input2i = input21[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void Hacl_Bignum_Fmul_fmul_(uint64_t *output, uint64_t *input, uint64_t *input21)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t t[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Bignum_Fmul_mul_shift_reduce_(t, input, input21);
+ Hacl_Bignum_Fproduct_carry_wide_(t);
+ FStar_UInt128_t b4 = t[4];
+ FStar_UInt128_t b0 = t[0];
+ FStar_UInt128_t
+ mask =
+ FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1),
+ (uint32_t )51),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1));
+ FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t )19,
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t )51))));
+ t[4] = b4_;
+ t[0] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+ uint64_t i0 = output[0];
+ uint64_t i1 = output[1];
+ uint64_t i0_ = i0 & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )51);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input21)
+{
+ uint64_t tmp[5] = { 0 };
+ memcpy(tmp, input, (uint32_t )5 * sizeof input[0]);
+ Hacl_Bignum_Fmul_fmul_(output, tmp, input21);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_upd_5(
+ FStar_UInt128_t *tmp,
+ FStar_UInt128_t s0,
+ FStar_UInt128_t s1,
+ FStar_UInt128_t s2,
+ FStar_UInt128_t s3,
+ FStar_UInt128_t s4
+)
+{
+ tmp[0] = s0;
+ tmp[1] = s1;
+ tmp[2] = s2;
+ tmp[3] = s3;
+ tmp[4] = s4;
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare__(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ uint64_t r0 = output[0];
+ uint64_t r1 = output[1];
+ uint64_t r2 = output[2];
+ uint64_t r3 = output[3];
+ uint64_t r4 = output[4];
+ uint64_t d0 = r0 * (uint64_t )2;
+ uint64_t d1 = r1 * (uint64_t )2;
+ uint64_t d2 = r2 * (uint64_t )2 * (uint64_t )19;
+ uint64_t d419 = r4 * (uint64_t )19;
+ uint64_t d4 = d419 * (uint64_t )2;
+ FStar_UInt128_t
+ s0 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(r0, r0),
+ FStar_UInt128_mul_wide(d4, r1)),
+ FStar_UInt128_mul_wide(d2, r3));
+ FStar_UInt128_t
+ s1 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r1),
+ FStar_UInt128_mul_wide(d4, r2)),
+ FStar_UInt128_mul_wide(r3 * (uint64_t )19, r3));
+ FStar_UInt128_t
+ s2 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r2),
+ FStar_UInt128_mul_wide(r1, r1)),
+ FStar_UInt128_mul_wide(d4, r3));
+ FStar_UInt128_t
+ s3 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r3),
+ FStar_UInt128_mul_wide(d1, r2)),
+ FStar_UInt128_mul_wide(r4, d419));
+ FStar_UInt128_t
+ s4 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r4),
+ FStar_UInt128_mul_wide(d1, r3)),
+ FStar_UInt128_mul_wide(r2, r2));
+ Hacl_Bignum_Fsquare_upd_5(tmp, s0, s1, s2, s3, s4);
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare_(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ Hacl_Bignum_Fsquare_fsquare__(tmp, output);
+ Hacl_Bignum_Fproduct_carry_wide_(tmp);
+ FStar_UInt128_t b4 = tmp[4];
+ FStar_UInt128_t b0 = tmp[0];
+ FStar_UInt128_t
+ mask =
+ FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1),
+ (uint32_t )51),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )1));
+ FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t )19,
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t )51))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+ uint64_t i0 = output[0];
+ uint64_t i1 = output[1];
+ uint64_t i0_ = i0 & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )51);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+static void
+Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *input, FStar_UInt128_t *tmp, uint32_t count1)
+{
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+ for (uint32_t i = (uint32_t )1; i < count1; i = i + (uint32_t )1)
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t t[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ memcpy(output, input, (uint32_t )5 * sizeof input[0]);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t t[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
+{
+ uint64_t buf[20] = { 0 };
+ uint64_t *a = buf;
+ uint64_t *t00 = buf + (uint32_t )5;
+ uint64_t *b0 = buf + (uint32_t )10;
+ (void )(buf + (uint32_t )15);
+ Hacl_Bignum_Fsquare_fsquare_times(a, z, (uint32_t )1);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t )2);
+ Hacl_Bignum_Fmul_fmul(b0, t00, z);
+ Hacl_Bignum_Fmul_fmul(a, b0, a);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t )1);
+ Hacl_Bignum_Fmul_fmul(b0, t00, b0);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t )5);
+ uint64_t *t01 = buf + (uint32_t )5;
+ uint64_t *b1 = buf + (uint32_t )10;
+ uint64_t *c0 = buf + (uint32_t )15;
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t )10);
+ Hacl_Bignum_Fmul_fmul(c0, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t )20);
+ Hacl_Bignum_Fmul_fmul(t01, t01, c0);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t )10);
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t )50);
+ uint64_t *a0 = buf;
+ uint64_t *t0 = buf + (uint32_t )5;
+ uint64_t *b = buf + (uint32_t )10;
+ uint64_t *c = buf + (uint32_t )15;
+ Hacl_Bignum_Fmul_fmul(c, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t )100);
+ Hacl_Bignum_Fmul_fmul(t0, t0, c);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t )50);
+ Hacl_Bignum_Fmul_fmul(t0, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t )5);
+ Hacl_Bignum_Fmul_fmul(out, t0, a0);
+}
+
+inline static void Hacl_Bignum_Crecip_crecip_(uint64_t *out, uint64_t *z)
+{
+ uint64_t buf[20] = { 0 };
+ uint64_t *a = buf;
+ uint64_t *t00 = buf + (uint32_t )5;
+ uint64_t *b0 = buf + (uint32_t )10;
+ (void )(buf + (uint32_t )15);
+ Hacl_Bignum_Fsquare_fsquare_times(a, z, (uint32_t )1);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t )2);
+ Hacl_Bignum_Fmul_fmul(b0, t00, z);
+ Hacl_Bignum_Fmul_fmul(a, b0, a);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t )1);
+ Hacl_Bignum_Fmul_fmul(b0, t00, b0);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t )5);
+ uint64_t *t01 = buf + (uint32_t )5;
+ uint64_t *b1 = buf + (uint32_t )10;
+ uint64_t *c0 = buf + (uint32_t )15;
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t )10);
+ Hacl_Bignum_Fmul_fmul(c0, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t )20);
+ Hacl_Bignum_Fmul_fmul(t01, t01, c0);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t )10);
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t )50);
+ uint64_t *a0 = buf;
+ (void )(buf + (uint32_t )5);
+ (void )(buf + (uint32_t )10);
+ (void )(buf + (uint32_t )15);
+ Hacl_Bignum_Fsquare_fsquare_times(a0, z, (uint32_t )1);
+ uint64_t *a1 = buf;
+ uint64_t *t0 = buf + (uint32_t )5;
+ uint64_t *b = buf + (uint32_t )10;
+ uint64_t *c = buf + (uint32_t )15;
+ Hacl_Bignum_Fmul_fmul(c, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t )100);
+ Hacl_Bignum_Fmul_fmul(t0, t0, c);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t )50);
+ Hacl_Bignum_Fmul_fmul(t0, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t )2);
+ Hacl_Bignum_Fmul_fmul(out, t0, a1);
+}
+
+inline static void Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
+{
+ {
+ uint64_t uu____871 = a[0];
+ uint64_t uu____874 = b[0];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[0] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[1];
+ uint64_t uu____874 = b[1];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[1] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[2];
+ uint64_t uu____874 = b[2];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[2] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[3];
+ uint64_t uu____874 = b[3];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[3] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[4];
+ uint64_t uu____874 = b[4];
+ uint64_t uu____870 = uu____871 + uu____874;
+ a[4] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
+{
+ uint64_t tmp[5] = { 0 };
+ memcpy(tmp, b, (uint32_t )5 * sizeof b[0]);
+ uint64_t b0 = tmp[0];
+ uint64_t b1 = tmp[1];
+ uint64_t b2 = tmp[2];
+ uint64_t b3 = tmp[3];
+ uint64_t b4 = tmp[4];
+ tmp[0] = b0 + (uint64_t )0x3fffffffffff68;
+ tmp[1] = b1 + (uint64_t )0x3ffffffffffff8;
+ tmp[2] = b2 + (uint64_t )0x3ffffffffffff8;
+ tmp[3] = b3 + (uint64_t )0x3ffffffffffff8;
+ tmp[4] = b4 + (uint64_t )0x3ffffffffffff8;
+ {
+ uint64_t uu____871 = a[0];
+ uint64_t uu____874 = tmp[0];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[0] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[1];
+ uint64_t uu____874 = tmp[1];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[1] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[2];
+ uint64_t uu____874 = tmp[2];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[2] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[3];
+ uint64_t uu____874 = tmp[3];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[3] = uu____870;
+ }
+ {
+ uint64_t uu____871 = a[4];
+ uint64_t uu____874 = tmp[4];
+ uint64_t uu____870 = uu____874 - uu____871;
+ a[4] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum_Fmul_fmul(output, a, b);
+}
+
+static void
+Hacl_EC_Format_upd_5(
+ uint64_t *output,
+ uint64_t output0,
+ uint64_t output1,
+ uint64_t output2,
+ uint64_t output3,
+ uint64_t output4
+)
+{
+ output[0] = output0;
+ output[1] = output1;
+ output[2] = output2;
+ output[3] = output3;
+ output[4] = output4;
+}
+
+static void
+Hacl_EC_Format_upd_5_(
+ uint64_t *output,
+ uint64_t output0,
+ uint64_t output1,
+ uint64_t output2,
+ uint64_t output3,
+ uint64_t output4
+)
+{
+ output[0] = output0;
+ output[1] = output1;
+ output[2] = output2;
+ output[3] = output3;
+ output[4] = output4;
+}
+
+static void Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
+{
+ uint64_t mask_511 = (uint64_t )0x7ffffffffffff;
+ uint64_t i0 = load64_le(input);
+ uint8_t *x00 = input + (uint32_t )6;
+ uint64_t i1 = load64_le(x00);
+ uint8_t *x01 = input + (uint32_t )12;
+ uint64_t i2 = load64_le(x01);
+ uint8_t *x02 = input + (uint32_t )19;
+ uint64_t i3 = load64_le(x02);
+ uint8_t *x0 = input + (uint32_t )24;
+ uint64_t i4 = load64_le(x0);
+ uint64_t output0 = i0 & mask_511;
+ uint64_t output1 = i1 >> (uint32_t )3 & mask_511;
+ uint64_t output2 = i2 >> (uint32_t )6 & mask_511;
+ uint64_t output3 = i3 >> (uint32_t )1 & mask_511;
+ uint64_t output4 = i4 >> (uint32_t )12 & mask_511;
+ Hacl_EC_Format_upd_5(output, output0, output1, output2, output3, output4);
+}
+
+static void Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0];
+ uint64_t t1 = input[1];
+ uint64_t t2 = input[2];
+ uint64_t t3 = input[3];
+ uint64_t t4 = input[4];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t )51);
+ uint64_t t0_ = t0 & (uint64_t )0x7ffffffffffff;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t )51);
+ uint64_t t1__ = t1_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t )51);
+ uint64_t t2__ = t2_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t )51);
+ uint64_t t3__ = t3_ & (uint64_t )0x7ffffffffffff;
+ Hacl_EC_Format_upd_5_(input, t0_, t1__, t2__, t3__, t4_);
+}
+
+static void Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_first_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+}
+
+static void Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0];
+ uint64_t t1 = input[1];
+ uint64_t t2 = input[2];
+ uint64_t t3 = input[3];
+ uint64_t t4 = input[4];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t )51);
+ uint64_t t0_ = t0 & (uint64_t )0x7ffffffffffff;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t )51);
+ uint64_t t1__ = t1_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t )51);
+ uint64_t t2__ = t2_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t )51);
+ uint64_t t3__ = t3_ & (uint64_t )0x7ffffffffffff;
+ Hacl_EC_Format_upd_5_(input, t0_, t1__, t2__, t3__, t4_);
+}
+
+static void Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_second_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+ uint64_t i0 = input[0];
+ uint64_t i1 = input[1];
+ uint64_t i0_ = i0 & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )51);
+ input[0] = i0_;
+ input[1] = i1_;
+}
+
+static void Hacl_EC_Format_fcontract_trim(uint64_t *input)
+{
+ uint64_t a0 = input[0];
+ uint64_t a1 = input[1];
+ uint64_t a2 = input[2];
+ uint64_t a3 = input[3];
+ uint64_t a4 = input[4];
+ uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t )0x7ffffffffffed);
+ uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t )0x7ffffffffffff);
+ uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t )0x7ffffffffffff);
+ uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t )0x7ffffffffffff);
+ uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t )0x7ffffffffffff);
+ uint64_t mask = mask0 & mask1 & mask2 & mask3 & mask4;
+ uint64_t a0_ = a0 - ((uint64_t )0x7ffffffffffed & mask);
+ uint64_t a1_ = a1 - ((uint64_t )0x7ffffffffffff & mask);
+ uint64_t a2_ = a2 - ((uint64_t )0x7ffffffffffff & mask);
+ uint64_t a3_ = a3 - ((uint64_t )0x7ffffffffffff & mask);
+ uint64_t a4_ = a4 - ((uint64_t )0x7ffffffffffff & mask);
+ Hacl_EC_Format_upd_5_(input, a0_, a1_, a2_, a3_, a4_);
+}
+
+static void Hacl_EC_Format_reduce(uint64_t *out)
+{
+ Hacl_EC_Format_fcontract_first_carry_full(out);
+ Hacl_EC_Format_fcontract_second_carry_full(out);
+ Hacl_EC_Format_fcontract_trim(out);
+}
+
+static void Hacl_Bignum25519_fsum(uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum_fsum(a, b);
+}
+
+static void Hacl_Bignum25519_fdifference(uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum_fdifference(a, b);
+}
+
+static void Hacl_Bignum25519_reduce_513(uint64_t *a)
+{
+ uint64_t t0 = a[0];
+ uint64_t t1 = a[1];
+ uint64_t t2 = a[2];
+ uint64_t t3 = a[3];
+ uint64_t t4 = a[4];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t )51);
+ uint64_t t0_ = t0 & (uint64_t )0x7ffffffffffff;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t )51);
+ uint64_t t1__ = t1_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t )51);
+ uint64_t t2__ = t2_ & (uint64_t )0x7ffffffffffff;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t )51);
+ uint64_t t3__ = t3_ & (uint64_t )0x7ffffffffffff;
+ Hacl_Lib_Create64_make_h64_5(a, t0_, t1__, t2__, t3__, t4_);
+ Hacl_Bignum_Modulo_carry_top(a);
+ uint64_t i0 = a[0];
+ uint64_t i1 = a[1];
+ uint64_t i0_ = i0 & ((uint64_t )1 << (uint32_t )51) - (uint64_t )1;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )51);
+ a[0] = i0_;
+ a[1] = i1_;
+}
+
+static void Hacl_Bignum25519_fdifference_reduced(uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum25519_fdifference(a, b);
+ Hacl_Bignum25519_reduce_513(a);
+}
+
+static void Hacl_Bignum25519_fmul(uint64_t *out, uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum_fmul(out, a, b);
+}
+
+static void Hacl_Bignum25519_times_2(uint64_t *out, uint64_t *a)
+{
+ uint64_t a0 = a[0];
+ uint64_t a1 = a[1];
+ uint64_t a2 = a[2];
+ uint64_t a3 = a[3];
+ uint64_t a4 = a[4];
+ uint64_t o0 = (uint64_t )2 * a0;
+ uint64_t o1 = (uint64_t )2 * a1;
+ uint64_t o2 = (uint64_t )2 * a2;
+ uint64_t o3 = (uint64_t )2 * a3;
+ uint64_t o4 = (uint64_t )2 * a4;
+ Hacl_Lib_Create64_make_h64_5(out, o0, o1, o2, o3, o4);
+}
+
+static void Hacl_Bignum25519_times_d(uint64_t *out, uint64_t *a)
+{
+ uint64_t d1[5] = { 0 };
+ Hacl_Lib_Create64_make_h64_5(d1,
+ (uint64_t )0x00034dca135978a3,
+ (uint64_t )0x0001a8283b156ebd,
+ (uint64_t )0x0005e7a26001c029,
+ (uint64_t )0x000739c663a03cbb,
+ (uint64_t )0x00052036cee2b6ff);
+ Hacl_Bignum25519_fmul(out, d1, a);
+}
+
+static void Hacl_Bignum25519_times_2d(uint64_t *out, uint64_t *a)
+{
+ uint64_t d2[5] = { 0 };
+ Hacl_Lib_Create64_make_h64_5(d2,
+ (uint64_t )0x00069b9426b2f159,
+ (uint64_t )0x00035050762add7a,
+ (uint64_t )0x0003cf44c0038052,
+ (uint64_t )0x0006738cc7407977,
+ (uint64_t )0x0002406d9dc56dff);
+ Hacl_Bignum25519_fmul(out, a, d2);
+}
+
+static void Hacl_Bignum25519_fsquare(uint64_t *out, uint64_t *a)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )5);
+ FStar_UInt128_t tmp[5];
+ for (uintmax_t _i = 0; _i < (uint32_t )5; ++_i)
+ tmp[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ memcpy(out, a, (uint32_t )5 * sizeof a[0]);
+ Hacl_Bignum_Fsquare_fsquare_(tmp, out);
+}
+
+static void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a)
+{
+ Hacl_Bignum_Crecip_crecip(out, a);
+}
+
+static void Hacl_Bignum25519_reduce(uint64_t *out)
+{
+ Hacl_EC_Format_reduce(out);
+}
+
+static uint64_t *Hacl_Impl_Ed25519_ExtPoint_getx(uint64_t *p)
+{
+ return p;
+}
+
+static uint64_t *Hacl_Impl_Ed25519_ExtPoint_gety(uint64_t *p)
+{
+ return p + (uint32_t )5;
+}
+
+static uint64_t *Hacl_Impl_Ed25519_ExtPoint_getz(uint64_t *p)
+{
+ return p + (uint32_t )10;
+}
+
+static uint64_t *Hacl_Impl_Ed25519_ExtPoint_gett(uint64_t *p)
+{
+ return p + (uint32_t )15;
+}
+
+static void Hacl_Impl_Ed25519_G_make_g(uint64_t *g1)
+{
+ uint64_t *gx = Hacl_Impl_Ed25519_ExtPoint_getx(g1);
+ uint64_t *gy = Hacl_Impl_Ed25519_ExtPoint_gety(g1);
+ uint64_t *gz = Hacl_Impl_Ed25519_ExtPoint_getz(g1);
+ uint64_t *gt1 = Hacl_Impl_Ed25519_ExtPoint_gett(g1);
+ Hacl_Lib_Create64_make_h64_5(gx,
+ (uint64_t )0x00062d608f25d51a,
+ (uint64_t )0x000412a4b4f6592a,
+ (uint64_t )0x00075b7171a4b31d,
+ (uint64_t )0x0001ff60527118fe,
+ (uint64_t )0x000216936d3cd6e5);
+ Hacl_Lib_Create64_make_h64_5(gy,
+ (uint64_t )0x0006666666666658,
+ (uint64_t )0x0004cccccccccccc,
+ (uint64_t )0x0001999999999999,
+ (uint64_t )0x0003333333333333,
+ (uint64_t )0x0006666666666666);
+ Hacl_Lib_Create64_make_h64_5(gz,
+ (uint64_t )0x0000000000000001,
+ (uint64_t )0x0000000000000000,
+ (uint64_t )0x0000000000000000,
+ (uint64_t )0x0000000000000000,
+ (uint64_t )0x0000000000000000);
+ Hacl_Lib_Create64_make_h64_5(gt1,
+ (uint64_t )0x00068ab3a5b7dda3,
+ (uint64_t )0x00000eea2a5eadbb,
+ (uint64_t )0x0002af8df483c27e,
+ (uint64_t )0x000332b375274732,
+ (uint64_t )0x00067875f0fd78b7);
+}
+
+static void Hacl_Impl_Store51_store_51_(uint8_t *output, uint64_t *input)
+{
+ uint64_t t0 = input[0];
+ uint64_t t1 = input[1];
+ uint64_t t2 = input[2];
+ uint64_t t3 = input[3];
+ uint64_t t4 = input[4];
+ uint64_t o0 = t1 << (uint32_t )51 | t0;
+ uint64_t o1 = t2 << (uint32_t )38 | t1 >> (uint32_t )13;
+ uint64_t o2 = t3 << (uint32_t )25 | t2 >> (uint32_t )26;
+ uint64_t o3 = t4 << (uint32_t )12 | t3 >> (uint32_t )39;
+ uint8_t *b0 = output;
+ uint8_t *b1 = output + (uint32_t )8;
+ uint8_t *b2 = output + (uint32_t )16;
+ uint8_t *b3 = output + (uint32_t )24;
+ store64_le(b0, o0);
+ store64_le(b1, o1);
+ store64_le(b2, o2);
+ store64_le(b3, o3);
+}
+
+static uint64_t Hacl_Impl_Ed25519_PointCompress_x_mod_2(uint64_t *x)
+{
+ uint64_t x0 = x[0];
+ return x0 & (uint64_t )1;
+}
+
+static void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p)
+{
+ uint64_t tmp[15] = { 0 };
+ uint64_t *x0 = tmp + (uint32_t )5;
+ uint64_t *out0 = tmp + (uint32_t )10;
+ uint64_t *zinv = tmp;
+ uint64_t *x = tmp + (uint32_t )5;
+ uint64_t *out = tmp + (uint32_t )10;
+ uint64_t *px = Hacl_Impl_Ed25519_ExtPoint_getx(p);
+ uint64_t *py = Hacl_Impl_Ed25519_ExtPoint_gety(p);
+ uint64_t *pz = Hacl_Impl_Ed25519_ExtPoint_getz(p);
+ Hacl_Bignum25519_inverse(zinv, pz);
+ Hacl_Bignum25519_fmul(x, px, zinv);
+ Hacl_Bignum25519_reduce(x);
+ Hacl_Bignum25519_fmul(out, py, zinv);
+ Hacl_Bignum25519_reduce(out);
+ uint64_t b = Hacl_Impl_Ed25519_PointCompress_x_mod_2(x0);
+ Hacl_Impl_Store51_store_51_(z, out0);
+ uint8_t xbyte = (uint8_t )b;
+ uint8_t o31 = z[31];
+ z[31] = o31 + (xbyte << (uint32_t )7);
+}
+
+static void
+Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(
+ uint64_t *a_,
+ uint64_t *b_,
+ uint64_t *a,
+ uint64_t *b,
+ uint64_t swap1
+)
+{
+ uint64_t a0 = a[0];
+ uint64_t a1 = a[1];
+ uint64_t a2 = a[2];
+ uint64_t a3 = a[3];
+ uint64_t a4 = a[4];
+ uint64_t b0 = b[0];
+ uint64_t b1 = b[1];
+ uint64_t b2 = b[2];
+ uint64_t b3 = b[3];
+ uint64_t b4 = b[4];
+ uint64_t x0 = swap1 & (a0 ^ b0);
+ uint64_t x1 = swap1 & (a1 ^ b1);
+ uint64_t x2 = swap1 & (a2 ^ b2);
+ uint64_t x3 = swap1 & (a3 ^ b3);
+ uint64_t x4 = swap1 & (a4 ^ b4);
+ uint64_t a0_ = a0 ^ x0;
+ uint64_t b0_ = b0 ^ x0;
+ uint64_t a1_ = a1 ^ x1;
+ uint64_t b1_ = b1 ^ x1;
+ uint64_t a2_ = a2 ^ x2;
+ uint64_t b2_ = b2 ^ x2;
+ uint64_t a3_ = a3 ^ x3;
+ uint64_t b3_ = b3 ^ x3;
+ uint64_t a4_ = a4 ^ x4;
+ uint64_t b4_ = b4 ^ x4;
+ Hacl_Lib_Create64_make_h64_5(a_, a0_, a1_, a2_, a3_, a4_);
+ Hacl_Lib_Create64_make_h64_5(b_, b0_, b1_, b2_, b3_, b4_);
+}
+
+static void
+Hacl_Impl_Ed25519_SwapConditional_swap_conditional(
+ uint64_t *a_,
+ uint64_t *b_,
+ uint64_t *a,
+ uint64_t *b,
+ uint64_t iswap
+)
+{
+ uint64_t swap1 = (uint64_t )0 - iswap;
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_getx(a_),
+ Hacl_Impl_Ed25519_ExtPoint_getx(b_),
+ Hacl_Impl_Ed25519_ExtPoint_getx(a),
+ Hacl_Impl_Ed25519_ExtPoint_getx(b),
+ swap1);
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_gety(a_),
+ Hacl_Impl_Ed25519_ExtPoint_gety(b_),
+ Hacl_Impl_Ed25519_ExtPoint_gety(a),
+ Hacl_Impl_Ed25519_ExtPoint_gety(b),
+ swap1);
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_getz(a_),
+ Hacl_Impl_Ed25519_ExtPoint_getz(b_),
+ Hacl_Impl_Ed25519_ExtPoint_getz(a),
+ Hacl_Impl_Ed25519_ExtPoint_getz(b),
+ swap1);
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_gett(a_),
+ Hacl_Impl_Ed25519_ExtPoint_gett(b_),
+ Hacl_Impl_Ed25519_ExtPoint_gett(a),
+ Hacl_Impl_Ed25519_ExtPoint_gett(b),
+ swap1);
+}
+
+static void
+Hacl_Impl_Ed25519_SwapConditional_swap_conditional_inplace(
+ uint64_t *a,
+ uint64_t *b,
+ uint64_t iswap
+)
+{
+ uint64_t swap1 = (uint64_t )0 - iswap;
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_getx(a),
+ Hacl_Impl_Ed25519_ExtPoint_getx(b),
+ Hacl_Impl_Ed25519_ExtPoint_getx(a),
+ Hacl_Impl_Ed25519_ExtPoint_getx(b),
+ swap1);
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_gety(a),
+ Hacl_Impl_Ed25519_ExtPoint_gety(b),
+ Hacl_Impl_Ed25519_ExtPoint_gety(a),
+ Hacl_Impl_Ed25519_ExtPoint_gety(b),
+ swap1);
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_getz(a),
+ Hacl_Impl_Ed25519_ExtPoint_getz(b),
+ Hacl_Impl_Ed25519_ExtPoint_getz(a),
+ Hacl_Impl_Ed25519_ExtPoint_getz(b),
+ swap1);
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_step(Hacl_Impl_Ed25519_ExtPoint_gett(a),
+ Hacl_Impl_Ed25519_ExtPoint_gett(b),
+ Hacl_Impl_Ed25519_ExtPoint_gett(a),
+ Hacl_Impl_Ed25519_ExtPoint_gett(b),
+ swap1);
+}
+
+static void Hacl_Impl_Ed25519_SwapConditional_copy(uint64_t *output, uint64_t *input)
+{
+ memcpy(output, input, (uint32_t )20 * sizeof input[0]);
+}
+
+static void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q1)
+{
+ uint64_t tmp[30] = { 0 };
+ uint64_t *tmp1 = tmp;
+ uint64_t *tmp20 = tmp + (uint32_t )5;
+ uint64_t *tmp30 = tmp + (uint32_t )10;
+ uint64_t *tmp40 = tmp + (uint32_t )15;
+ (void )(tmp + (uint32_t )20);
+ (void )(tmp + (uint32_t )25);
+ uint64_t *x1 = Hacl_Impl_Ed25519_ExtPoint_getx(p);
+ uint64_t *y1 = Hacl_Impl_Ed25519_ExtPoint_gety(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getz(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(p);
+ uint64_t *x2 = Hacl_Impl_Ed25519_ExtPoint_getx(q1);
+ uint64_t *y2 = Hacl_Impl_Ed25519_ExtPoint_gety(q1);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getz(q1);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(q1);
+ memcpy(tmp1, x1, (uint32_t )5 * sizeof x1[0]);
+ memcpy(tmp20, x2, (uint32_t )5 * sizeof x2[0]);
+ Hacl_Bignum25519_fdifference_reduced(tmp1, y1);
+ Hacl_Bignum25519_fdifference(tmp20, y2);
+ Hacl_Bignum25519_fmul(tmp30, tmp1, tmp20);
+ memcpy(tmp1, y1, (uint32_t )5 * sizeof y1[0]);
+ memcpy(tmp20, y2, (uint32_t )5 * sizeof y2[0]);
+ Hacl_Bignum25519_fsum(tmp1, x1);
+ Hacl_Bignum25519_fsum(tmp20, x2);
+ Hacl_Bignum25519_fmul(tmp40, tmp1, tmp20);
+ uint64_t *tmp10 = tmp;
+ uint64_t *tmp21 = tmp + (uint32_t )5;
+ uint64_t *tmp31 = tmp + (uint32_t )10;
+ (void )(tmp + (uint32_t )15);
+ uint64_t *tmp50 = tmp + (uint32_t )20;
+ uint64_t *tmp60 = tmp + (uint32_t )25;
+ (void )Hacl_Impl_Ed25519_ExtPoint_getx(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gety(p);
+ uint64_t *z1 = Hacl_Impl_Ed25519_ExtPoint_getz(p);
+ uint64_t *t1 = Hacl_Impl_Ed25519_ExtPoint_gett(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getx(q1);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gety(q1);
+ uint64_t *z2 = Hacl_Impl_Ed25519_ExtPoint_getz(q1);
+ uint64_t *t2 = Hacl_Impl_Ed25519_ExtPoint_gett(q1);
+ Hacl_Bignum25519_times_2d(tmp10, t1);
+ Hacl_Bignum25519_fmul(tmp21, tmp10, t2);
+ Hacl_Bignum25519_times_2(tmp10, z1);
+ Hacl_Bignum25519_fmul(tmp50, tmp10, z2);
+ memcpy(tmp10, tmp31, (uint32_t )5 * sizeof tmp31[0]);
+ memcpy(tmp60, tmp21, (uint32_t )5 * sizeof tmp21[0]);
+ uint64_t *tmp11 = tmp;
+ uint64_t *tmp2 = tmp + (uint32_t )5;
+ uint64_t *tmp3 = tmp + (uint32_t )10;
+ uint64_t *tmp41 = tmp + (uint32_t )15;
+ uint64_t *tmp51 = tmp + (uint32_t )20;
+ uint64_t *tmp61 = tmp + (uint32_t )25;
+ (void )Hacl_Impl_Ed25519_ExtPoint_getx(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gety(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getz(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getx(q1);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gety(q1);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getz(q1);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(q1);
+ Hacl_Bignum25519_fdifference_reduced(tmp11, tmp41);
+ Hacl_Bignum25519_fdifference(tmp61, tmp51);
+ Hacl_Bignum25519_fsum(tmp51, tmp2);
+ Hacl_Bignum25519_fsum(tmp41, tmp3);
+ uint64_t *tmp12 = tmp;
+ (void )(tmp + (uint32_t )5);
+ (void )(tmp + (uint32_t )10);
+ uint64_t *tmp4 = tmp + (uint32_t )15;
+ uint64_t *tmp5 = tmp + (uint32_t )20;
+ uint64_t *tmp6 = tmp + (uint32_t )25;
+ uint64_t *x3 = Hacl_Impl_Ed25519_ExtPoint_getx(out);
+ uint64_t *y3 = Hacl_Impl_Ed25519_ExtPoint_gety(out);
+ uint64_t *z3 = Hacl_Impl_Ed25519_ExtPoint_getz(out);
+ uint64_t *t3 = Hacl_Impl_Ed25519_ExtPoint_gett(out);
+ Hacl_Bignum25519_fmul(x3, tmp12, tmp6);
+ Hacl_Bignum25519_fmul(y3, tmp5, tmp4);
+ Hacl_Bignum25519_fmul(t3, tmp12, tmp4);
+ Hacl_Bignum25519_fmul(z3, tmp5, tmp6);
+}
+
+static void Hacl_Impl_Ed25519_PointDouble_point_double_step_1(uint64_t *p, uint64_t *tmp)
+{
+ uint64_t *tmp1 = tmp;
+ uint64_t *tmp2 = tmp + (uint32_t )5;
+ uint64_t *tmp3 = tmp + (uint32_t )10;
+ uint64_t *tmp4 = tmp + (uint32_t )15;
+ (void )(tmp + (uint32_t )20);
+ (void )(tmp + (uint32_t )25);
+ uint64_t *x1 = Hacl_Impl_Ed25519_ExtPoint_getx(p);
+ uint64_t *y1 = Hacl_Impl_Ed25519_ExtPoint_gety(p);
+ uint64_t *z1 = Hacl_Impl_Ed25519_ExtPoint_getz(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(p);
+ Hacl_Bignum25519_fsquare(tmp1, x1);
+ Hacl_Bignum25519_fsquare(tmp2, y1);
+ Hacl_Bignum25519_fsquare(tmp3, z1);
+ Hacl_Bignum25519_times_2(tmp4, tmp3);
+ memcpy(tmp3, tmp1, (uint32_t )5 * sizeof tmp1[0]);
+ Hacl_Bignum25519_fsum(tmp3, tmp2);
+ Hacl_Bignum25519_reduce_513(tmp3);
+}
+
+static void Hacl_Impl_Ed25519_PointDouble_point_double_step_2(uint64_t *p, uint64_t *tmp)
+{
+ uint64_t *tmp1 = tmp;
+ uint64_t *tmp2 = tmp + (uint32_t )5;
+ uint64_t *tmp3 = tmp + (uint32_t )10;
+ uint64_t *tmp4 = tmp + (uint32_t )15;
+ uint64_t *tmp5 = tmp + (uint32_t )20;
+ uint64_t *tmp6 = tmp + (uint32_t )25;
+ uint64_t *x1 = Hacl_Impl_Ed25519_ExtPoint_getx(p);
+ uint64_t *y1 = Hacl_Impl_Ed25519_ExtPoint_gety(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getz(p);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(p);
+ memcpy(tmp5, x1, (uint32_t )5 * sizeof x1[0]);
+ Hacl_Bignum25519_fsum(tmp5, y1);
+ Hacl_Bignum25519_fsquare(tmp6, tmp5);
+ memcpy(tmp5, tmp3, (uint32_t )5 * sizeof tmp3[0]);
+ Hacl_Bignum25519_fdifference(tmp6, tmp5);
+ Hacl_Bignum25519_fdifference_reduced(tmp2, tmp1);
+ Hacl_Bignum25519_reduce_513(tmp4);
+ Hacl_Bignum25519_fsum(tmp4, tmp2);
+}
+
+static void
+Hacl_Impl_Ed25519_PointDouble_point_double_(uint64_t *out, uint64_t *p, uint64_t *tmp)
+{
+ uint64_t *tmp2 = tmp + (uint32_t )5;
+ uint64_t *tmp3 = tmp + (uint32_t )10;
+ uint64_t *tmp4 = tmp + (uint32_t )15;
+ (void )(tmp + (uint32_t )20);
+ uint64_t *tmp6 = tmp + (uint32_t )25;
+ uint64_t *x3 = Hacl_Impl_Ed25519_ExtPoint_getx(out);
+ uint64_t *y3 = Hacl_Impl_Ed25519_ExtPoint_gety(out);
+ uint64_t *z3 = Hacl_Impl_Ed25519_ExtPoint_getz(out);
+ uint64_t *t3 = Hacl_Impl_Ed25519_ExtPoint_gett(out);
+ Hacl_Impl_Ed25519_PointDouble_point_double_step_1(p, tmp);
+ Hacl_Impl_Ed25519_PointDouble_point_double_step_2(p, tmp);
+ Hacl_Bignum25519_fmul(x3, tmp4, tmp6);
+ Hacl_Bignum25519_fmul(y3, tmp2, tmp3);
+ Hacl_Bignum25519_fmul(t3, tmp3, tmp6);
+ Hacl_Bignum25519_fmul(z3, tmp4, tmp2);
+}
+
+static void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
+{
+ uint64_t tmp[30] = { 0 };
+ Hacl_Impl_Ed25519_PointDouble_point_double_(out, p, tmp);
+}
+
+static uint8_t Hacl_Impl_Ed25519_Ladder_Step_ith_bit(uint8_t *k1, uint32_t i)
+{
+ uint32_t q1 = i >> (uint32_t )3;
+ uint32_t r = i & (uint32_t )7;
+ uint8_t kq = k1[q1];
+ return kq >> r & (uint8_t )1;
+}
+
+static void
+Hacl_Impl_Ed25519_Ladder_Step_swap_cond_inplace(uint64_t *p, uint64_t *q1, uint64_t iswap)
+{
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional_inplace(p, q1, iswap);
+}
+
+static void
+Hacl_Impl_Ed25519_Ladder_Step_swap_cond(
+ uint64_t *p_,
+ uint64_t *q_,
+ uint64_t *p,
+ uint64_t *q1,
+ uint64_t iswap
+)
+{
+ Hacl_Impl_Ed25519_SwapConditional_swap_conditional(p_, q_, p, q1, iswap);
+}
+
+static void
+Hacl_Impl_Ed25519_Ladder_Step_loop_step_1(uint64_t *b, uint8_t *k1, uint32_t ctr, uint8_t i)
+{
+ uint64_t *nq = b;
+ uint64_t *nqpq = b + (uint32_t )20;
+ (void )(b + (uint32_t )40);
+ (void )(b + (uint32_t )60);
+ uint64_t bit = (uint64_t )i;
+ Hacl_Impl_Ed25519_Ladder_Step_swap_cond_inplace(nq, nqpq, bit);
+}
+
+static void Hacl_Impl_Ed25519_Ladder_Step_loop_step_2(uint64_t *b, uint8_t *k1, uint32_t ctr)
+{
+ uint64_t *nq = b;
+ uint64_t *nqpq = b + (uint32_t )20;
+ uint64_t *nq2 = b + (uint32_t )40;
+ uint64_t *nqpq2 = b + (uint32_t )60;
+ Hacl_Impl_Ed25519_PointDouble_point_double(nq2, nq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getx(nq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gety(nq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getz(nq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(nq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getx(nqpq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gety(nqpq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_getz(nqpq);
+ (void )Hacl_Impl_Ed25519_ExtPoint_gett(nqpq);
+ Hacl_Impl_Ed25519_PointAdd_point_add(nqpq2, nq, nqpq);
+}
+
+static void
+Hacl_Impl_Ed25519_Ladder_Step_loop_step_3(uint64_t *b, uint8_t *k1, uint32_t ctr, uint8_t i)
+{
+ uint64_t *nq = b;
+ uint64_t *nqpq = b + (uint32_t )20;
+ uint64_t *nq2 = b + (uint32_t )40;
+ uint64_t *nqpq2 = b + (uint32_t )60;
+ uint64_t bit = (uint64_t )i;
+ Hacl_Impl_Ed25519_Ladder_Step_swap_cond(nq, nqpq, nq2, nqpq2, bit);
+}
+
+static void Hacl_Impl_Ed25519_Ladder_Step_loop_step(uint64_t *b, uint8_t *k1, uint32_t ctr)
+{
+ (void )(b + (uint32_t )20);
+ (void )(b + (uint32_t )40);
+ (void )(b + (uint32_t )60);
+ uint8_t bit = Hacl_Impl_Ed25519_Ladder_Step_ith_bit(k1, ctr);
+ Hacl_Impl_Ed25519_Ladder_Step_loop_step_1(b, k1, ctr, bit);
+ Hacl_Impl_Ed25519_Ladder_Step_loop_step_2(b, k1, ctr);
+ Hacl_Impl_Ed25519_Ladder_Step_loop_step_3(b, k1, ctr, bit);
+}
+
+static void Hacl_Impl_Ed25519_Ladder_point_mul_(uint64_t *b, uint8_t *k1)
+{
+ (void )(b + (uint32_t )20);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )256; i = i + (uint32_t )1)
+ {
+ (void )(b + (uint32_t )20);
+ Hacl_Impl_Ed25519_Ladder_Step_loop_step(b, k1, (uint32_t )256 - i - (uint32_t )1);
+ }
+}
+
+static void Hacl_Impl_Ed25519_Ladder_make_point_inf(uint64_t *b)
+{
+ uint64_t *x = b;
+ uint64_t *y = b + (uint32_t )5;
+ uint64_t *z = b + (uint32_t )10;
+ uint64_t *t = b + (uint32_t )15;
+ uint64_t zero1 = (uint64_t )0;
+ Hacl_Lib_Create64_make_h64_5(x, zero1, zero1, zero1, zero1, zero1);
+ uint64_t zero10 = (uint64_t )0;
+ uint64_t one10 = (uint64_t )1;
+ Hacl_Lib_Create64_make_h64_5(y, one10, zero10, zero10, zero10, zero10);
+ uint64_t zero11 = (uint64_t )0;
+ uint64_t one1 = (uint64_t )1;
+ Hacl_Lib_Create64_make_h64_5(z, one1, zero11, zero11, zero11, zero11);
+ uint64_t zero12 = (uint64_t )0;
+ Hacl_Lib_Create64_make_h64_5(t, zero12, zero12, zero12, zero12, zero12);
+}
+
+static void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *result, uint8_t *scalar, uint64_t *q1)
+{
+ uint64_t b[80] = { 0 };
+ uint64_t *nq = b;
+ uint64_t *nqpq = b + (uint32_t )20;
+ Hacl_Impl_Ed25519_Ladder_make_point_inf(nq);
+ Hacl_Impl_Ed25519_SwapConditional_copy(nqpq, q1);
+ Hacl_Impl_Ed25519_Ladder_point_mul_(b, scalar);
+ Hacl_Impl_Ed25519_SwapConditional_copy(result, nq);
+}
+
+static void
+Hacl_Hash_Lib_LoadStore_uint64s_from_be_bytes(uint64_t *output, uint8_t *input, uint32_t len1)
+{
+ for (uint32_t i = (uint32_t )0; i < len1; i = i + (uint32_t )1)
+ {
+ uint8_t *x0 = input + (uint32_t )8 * i;
+ uint64_t inputi = load64_be(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Hash_Lib_LoadStore_uint64s_to_be_bytes(uint8_t *output, uint64_t *input, uint32_t len1)
+{
+ for (uint32_t i = (uint32_t )0; i < len1; i = i + (uint32_t )1)
+ {
+ uint64_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t )8 * i;
+ store64_be(x0, hd1);
+ }
+}
+
+static void Hacl_Hash_SHA2_512_init(uint64_t *state)
+{
+ (void )(state + (uint32_t )168);
+ uint64_t *k1 = state;
+ uint64_t *h_01 = state + (uint32_t )160;
+ uint64_t *p10 = k1;
+ uint64_t *p20 = k1 + (uint32_t )16;
+ uint64_t *p3 = k1 + (uint32_t )32;
+ uint64_t *p4 = k1 + (uint32_t )48;
+ uint64_t *p5 = k1 + (uint32_t )64;
+ uint64_t *p11 = p10;
+ uint64_t *p21 = p10 + (uint32_t )8;
+ uint64_t *p12 = p11;
+ uint64_t *p22 = p11 + (uint32_t )4;
+ p12[0] = (uint64_t )0x428a2f98d728ae22;
+ p12[1] = (uint64_t )0x7137449123ef65cd;
+ p12[2] = (uint64_t )0xb5c0fbcfec4d3b2f;
+ p12[3] = (uint64_t )0xe9b5dba58189dbbc;
+ p22[0] = (uint64_t )0x3956c25bf348b538;
+ p22[1] = (uint64_t )0x59f111f1b605d019;
+ p22[2] = (uint64_t )0x923f82a4af194f9b;
+ p22[3] = (uint64_t )0xab1c5ed5da6d8118;
+ uint64_t *p13 = p21;
+ uint64_t *p23 = p21 + (uint32_t )4;
+ p13[0] = (uint64_t )0xd807aa98a3030242;
+ p13[1] = (uint64_t )0x12835b0145706fbe;
+ p13[2] = (uint64_t )0x243185be4ee4b28c;
+ p13[3] = (uint64_t )0x550c7dc3d5ffb4e2;
+ p23[0] = (uint64_t )0x72be5d74f27b896f;
+ p23[1] = (uint64_t )0x80deb1fe3b1696b1;
+ p23[2] = (uint64_t )0x9bdc06a725c71235;
+ p23[3] = (uint64_t )0xc19bf174cf692694;
+ uint64_t *p14 = p20;
+ uint64_t *p24 = p20 + (uint32_t )8;
+ uint64_t *p15 = p14;
+ uint64_t *p25 = p14 + (uint32_t )4;
+ p15[0] = (uint64_t )0xe49b69c19ef14ad2;
+ p15[1] = (uint64_t )0xefbe4786384f25e3;
+ p15[2] = (uint64_t )0x0fc19dc68b8cd5b5;
+ p15[3] = (uint64_t )0x240ca1cc77ac9c65;
+ p25[0] = (uint64_t )0x2de92c6f592b0275;
+ p25[1] = (uint64_t )0x4a7484aa6ea6e483;
+ p25[2] = (uint64_t )0x5cb0a9dcbd41fbd4;
+ p25[3] = (uint64_t )0x76f988da831153b5;
+ uint64_t *p16 = p24;
+ uint64_t *p26 = p24 + (uint32_t )4;
+ p16[0] = (uint64_t )0x983e5152ee66dfab;
+ p16[1] = (uint64_t )0xa831c66d2db43210;
+ p16[2] = (uint64_t )0xb00327c898fb213f;
+ p16[3] = (uint64_t )0xbf597fc7beef0ee4;
+ p26[0] = (uint64_t )0xc6e00bf33da88fc2;
+ p26[1] = (uint64_t )0xd5a79147930aa725;
+ p26[2] = (uint64_t )0x06ca6351e003826f;
+ p26[3] = (uint64_t )0x142929670a0e6e70;
+ uint64_t *p17 = p3;
+ uint64_t *p27 = p3 + (uint32_t )8;
+ uint64_t *p18 = p17;
+ uint64_t *p28 = p17 + (uint32_t )4;
+ p18[0] = (uint64_t )0x27b70a8546d22ffc;
+ p18[1] = (uint64_t )0x2e1b21385c26c926;
+ p18[2] = (uint64_t )0x4d2c6dfc5ac42aed;
+ p18[3] = (uint64_t )0x53380d139d95b3df;
+ p28[0] = (uint64_t )0x650a73548baf63de;
+ p28[1] = (uint64_t )0x766a0abb3c77b2a8;
+ p28[2] = (uint64_t )0x81c2c92e47edaee6;
+ p28[3] = (uint64_t )0x92722c851482353b;
+ uint64_t *p19 = p27;
+ uint64_t *p29 = p27 + (uint32_t )4;
+ p19[0] = (uint64_t )0xa2bfe8a14cf10364;
+ p19[1] = (uint64_t )0xa81a664bbc423001;
+ p19[2] = (uint64_t )0xc24b8b70d0f89791;
+ p19[3] = (uint64_t )0xc76c51a30654be30;
+ p29[0] = (uint64_t )0xd192e819d6ef5218;
+ p29[1] = (uint64_t )0xd69906245565a910;
+ p29[2] = (uint64_t )0xf40e35855771202a;
+ p29[3] = (uint64_t )0x106aa07032bbd1b8;
+ uint64_t *p110 = p4;
+ uint64_t *p210 = p4 + (uint32_t )8;
+ uint64_t *p111 = p110;
+ uint64_t *p211 = p110 + (uint32_t )4;
+ p111[0] = (uint64_t )0x19a4c116b8d2d0c8;
+ p111[1] = (uint64_t )0x1e376c085141ab53;
+ p111[2] = (uint64_t )0x2748774cdf8eeb99;
+ p111[3] = (uint64_t )0x34b0bcb5e19b48a8;
+ p211[0] = (uint64_t )0x391c0cb3c5c95a63;
+ p211[1] = (uint64_t )0x4ed8aa4ae3418acb;
+ p211[2] = (uint64_t )0x5b9cca4f7763e373;
+ p211[3] = (uint64_t )0x682e6ff3d6b2b8a3;
+ uint64_t *p112 = p210;
+ uint64_t *p212 = p210 + (uint32_t )4;
+ p112[0] = (uint64_t )0x748f82ee5defb2fc;
+ p112[1] = (uint64_t )0x78a5636f43172f60;
+ p112[2] = (uint64_t )0x84c87814a1f0ab72;
+ p112[3] = (uint64_t )0x8cc702081a6439ec;
+ p212[0] = (uint64_t )0x90befffa23631e28;
+ p212[1] = (uint64_t )0xa4506cebde82bde9;
+ p212[2] = (uint64_t )0xbef9a3f7b2c67915;
+ p212[3] = (uint64_t )0xc67178f2e372532b;
+ uint64_t *p113 = p5;
+ uint64_t *p213 = p5 + (uint32_t )8;
+ uint64_t *p1 = p113;
+ uint64_t *p214 = p113 + (uint32_t )4;
+ p1[0] = (uint64_t )0xca273eceea26619c;
+ p1[1] = (uint64_t )0xd186b8c721c0c207;
+ p1[2] = (uint64_t )0xeada7dd6cde0eb1e;
+ p1[3] = (uint64_t )0xf57d4f7fee6ed178;
+ p214[0] = (uint64_t )0x06f067aa72176fba;
+ p214[1] = (uint64_t )0x0a637dc5a2c898a6;
+ p214[2] = (uint64_t )0x113f9804bef90dae;
+ p214[3] = (uint64_t )0x1b710b35131c471b;
+ uint64_t *p114 = p213;
+ uint64_t *p215 = p213 + (uint32_t )4;
+ p114[0] = (uint64_t )0x28db77f523047d84;
+ p114[1] = (uint64_t )0x32caab7b40c72493;
+ p114[2] = (uint64_t )0x3c9ebe0a15c9bebc;
+ p114[3] = (uint64_t )0x431d67c49c100d4c;
+ p215[0] = (uint64_t )0x4cc5d4becb3e42b6;
+ p215[1] = (uint64_t )0x597f299cfc657e2a;
+ p215[2] = (uint64_t )0x5fcb6fab3ad6faec;
+ p215[3] = (uint64_t )0x6c44198c4a475817;
+ uint64_t *p115 = h_01;
+ uint64_t *p2 = h_01 + (uint32_t )4;
+ p115[0] = (uint64_t )0x6a09e667f3bcc908;
+ p115[1] = (uint64_t )0xbb67ae8584caa73b;
+ p115[2] = (uint64_t )0x3c6ef372fe94f82b;
+ p115[3] = (uint64_t )0xa54ff53a5f1d36f1;
+ p2[0] = (uint64_t )0x510e527fade682d1;
+ p2[1] = (uint64_t )0x9b05688c2b3e6c1f;
+ p2[2] = (uint64_t )0x1f83d9abfb41bd6b;
+ p2[3] = (uint64_t )0x5be0cd19137e2179;
+}
+
+static void Hacl_Hash_SHA2_512_update(uint64_t *state, uint8_t *data)
+{
+ KRML_CHECK_SIZE((uint64_t )(uint32_t )0, (uint32_t )16);
+ uint64_t data_w[16];
+ for (uintmax_t _i = 0; _i < (uint32_t )16; ++_i)
+ data_w[_i] = (uint64_t )(uint32_t )0;
+ Hacl_Hash_Lib_LoadStore_uint64s_from_be_bytes(data_w, data, (uint32_t )16);
+ uint64_t *hash_w = state + (uint32_t )160;
+ uint64_t *ws_w = state + (uint32_t )80;
+ uint64_t *k_w = state;
+ uint64_t *counter_w = state + (uint32_t )168;
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint64_t uu____242 = data_w[i];
+ ws_w[i] = uu____242;
+ }
+ for (uint32_t i = (uint32_t )16; i < (uint32_t )80; i = i + (uint32_t )1)
+ {
+ uint64_t t16 = ws_w[i - (uint32_t )16];
+ uint64_t t15 = ws_w[i - (uint32_t )15];
+ uint64_t t7 = ws_w[i - (uint32_t )7];
+ uint64_t t2 = ws_w[i - (uint32_t )2];
+ ws_w[i] =
+ ((t2 >> (uint32_t )19 | t2 << (uint32_t )64 - (uint32_t )19)
+ ^ (t2 >> (uint32_t )61 | t2 << (uint32_t )64 - (uint32_t )61) ^ t2 >> (uint32_t )6)
+ +
+ t7
+ +
+ ((t15 >> (uint32_t )1 | t15 << (uint32_t )64 - (uint32_t )1)
+ ^ (t15 >> (uint32_t )8 | t15 << (uint32_t )64 - (uint32_t )8) ^ t15 >> (uint32_t )7)
+ + t16;
+ }
+ uint64_t hash_0[8] = { 0 };
+ memcpy(hash_0, hash_w, (uint32_t )8 * sizeof hash_w[0]);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )80; i = i + (uint32_t )1)
+ {
+ uint64_t a = hash_0[0];
+ uint64_t b = hash_0[1];
+ uint64_t c = hash_0[2];
+ uint64_t d1 = hash_0[3];
+ uint64_t e = hash_0[4];
+ uint64_t f1 = hash_0[5];
+ uint64_t g1 = hash_0[6];
+ uint64_t h = hash_0[7];
+ uint64_t k_t = k_w[i];
+ uint64_t ws_t = ws_w[i];
+ uint64_t
+ t1 =
+ h
+ +
+ ((e >> (uint32_t )14 | e << (uint32_t )64 - (uint32_t )14)
+ ^
+ (e >> (uint32_t )18 | e << (uint32_t )64 - (uint32_t )18)
+ ^ (e >> (uint32_t )41 | e << (uint32_t )64 - (uint32_t )41))
+ + (e & f1 ^ ~e & g1)
+ + k_t
+ + ws_t;
+ uint64_t
+ t2 =
+ ((a >> (uint32_t )28 | a << (uint32_t )64 - (uint32_t )28)
+ ^
+ (a >> (uint32_t )34 | a << (uint32_t )64 - (uint32_t )34)
+ ^ (a >> (uint32_t )39 | a << (uint32_t )64 - (uint32_t )39))
+ + (a & b ^ a & c ^ b & c);
+ uint64_t x1 = t1 + t2;
+ uint64_t x5 = d1 + t1;
+ uint64_t *p1 = hash_0;
+ uint64_t *p2 = hash_0 + (uint32_t )4;
+ p1[0] = x1;
+ p1[1] = a;
+ p1[2] = b;
+ p1[3] = c;
+ p2[0] = x5;
+ p2[1] = e;
+ p2[2] = f1;
+ p2[3] = g1;
+ }
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )8; i = i + (uint32_t )1)
+ {
+ uint64_t uu____871 = hash_w[i];
+ uint64_t uu____874 = hash_0[i];
+ uint64_t uu____870 = uu____871 + uu____874;
+ hash_w[i] = uu____870;
+ }
+ uint64_t c0 = counter_w[0];
+ uint64_t one1 = (uint64_t )(uint32_t )1;
+ counter_w[0] = c0 + one1;
+}
+
+static void Hacl_Hash_SHA2_512_update_multi(uint64_t *state, uint8_t *data, uint32_t n1)
+{
+ for (uint32_t i = (uint32_t )0; i < n1; i = i + (uint32_t )1)
+ {
+ uint8_t *b = data + i * (uint32_t )128;
+ Hacl_Hash_SHA2_512_update(state, b);
+ }
+}
+
+static void Hacl_Hash_SHA2_512_update_last(uint64_t *state, uint8_t *data, uint64_t len1)
+{
+ uint8_t blocks[256] = { 0 };
+ uint32_t nb;
+ if (len1 < (uint64_t )112)
+ nb = (uint32_t )1;
+ else
+ nb = (uint32_t )2;
+ uint8_t *final_blocks;
+ if (len1 < (uint64_t )112)
+ final_blocks = blocks + (uint32_t )128;
+ else
+ final_blocks = blocks;
+ memcpy(final_blocks, data, (uint32_t )len1 * sizeof data[0]);
+ uint64_t n1 = state[168];
+ uint8_t *padding = final_blocks + (uint32_t )len1;
+ FStar_UInt128_t
+ encodedlen =
+ FStar_UInt128_shift_left(FStar_UInt128_add(FStar_UInt128_mul_wide(n1,
+ (uint64_t )(uint32_t )128),
+ FStar_Int_Cast_Full_uint64_to_uint128(len1)),
+ (uint32_t )3);
+ uint32_t
+ pad0len = ((uint32_t )256 - ((uint32_t )len1 + (uint32_t )16 + (uint32_t )1)) % (uint32_t )128;
+ uint8_t *buf1 = padding;
+ (void )(padding + (uint32_t )1);
+ uint8_t *buf2 = padding + (uint32_t )1 + pad0len;
+ buf1[0] = (uint8_t )0x80;
+ store128_be(buf2, encodedlen);
+ Hacl_Hash_SHA2_512_update_multi(state, final_blocks, nb);
+}
+
+static void Hacl_Hash_SHA2_512_finish(uint64_t *state, uint8_t *hash1)
+{
+ uint64_t *hash_w = state + (uint32_t )160;
+ Hacl_Hash_Lib_LoadStore_uint64s_to_be_bytes(hash1, hash_w, (uint32_t )8);
+}
+
+static void Hacl_Hash_SHA2_512_hash(uint8_t *hash1, uint8_t *input, uint32_t len1)
+{
+ KRML_CHECK_SIZE((uint64_t )(uint32_t )0, (uint32_t )169);
+ uint64_t state[169];
+ for (uintmax_t _i = 0; _i < (uint32_t )169; ++_i)
+ state[_i] = (uint64_t )(uint32_t )0;
+ uint32_t n1 = len1 / (uint32_t )128;
+ uint32_t r = len1 % (uint32_t )128;
+ uint8_t *input_blocks = input;
+ uint8_t *input_last = input + n1 * (uint32_t )128;
+ Hacl_Hash_SHA2_512_init(state);
+ Hacl_Hash_SHA2_512_update_multi(state, input_blocks, n1);
+ Hacl_Hash_SHA2_512_update_last(state, input_last, (uint64_t )r);
+ Hacl_Hash_SHA2_512_finish(state, hash1);
+}
+
+static void SHA2_512_hash(uint8_t *hash1, uint8_t *input, uint32_t len1)
+{
+ Hacl_Hash_SHA2_512_hash(hash1, input, len1);
+}
+
+static void Hacl_Impl_Ed25519_SecretExpand_secret_expand(uint8_t *expanded, uint8_t *secret)
+{
+ SHA2_512_hash(expanded, secret, (uint32_t )32);
+ uint8_t *h_low = expanded;
+ (void )(expanded + (uint32_t )32);
+ uint8_t h_low0 = h_low[0];
+ uint8_t h_low31 = h_low[31];
+ h_low[0] = h_low0 & (uint8_t )0xf8;
+ h_low[31] = h_low31 & (uint8_t )127 | (uint8_t )64;
+}
+
+static void Hacl_Impl_Ed25519_SecretToPublic_point_mul_g(uint64_t *result, uint8_t *scalar)
+{
+ uint64_t g1[20] = { 0 };
+ Hacl_Impl_Ed25519_G_make_g(g1);
+ Hacl_Impl_Ed25519_Ladder_point_mul(result, scalar, g1);
+}
+
+static void
+Hacl_Impl_Ed25519_SecretToPublic_secret_to_public_(
+ uint8_t *out,
+ uint8_t *secret,
+ uint8_t *expanded_secret
+)
+{
+ uint8_t *a = expanded_secret;
+ uint64_t res[20] = { 0 };
+ Hacl_Impl_Ed25519_SecretToPublic_point_mul_g(res, a);
+ Hacl_Impl_Ed25519_PointCompress_point_compress(out, res);
+}
+
+static void Hacl_Impl_Ed25519_SecretToPublic_secret_to_public(uint8_t *out, uint8_t *secret)
+{
+ uint8_t expanded[64] = { 0 };
+ Hacl_Impl_Ed25519_SecretExpand_secret_expand(expanded, secret);
+ Hacl_Impl_Ed25519_SecretToPublic_secret_to_public_(out, secret, expanded);
+}
+
+static bool Hacl_Impl_Ed25519_PointEqual_gte_q(uint64_t *s)
+{
+ uint64_t s0 = s[0];
+ uint64_t s1 = s[1];
+ uint64_t s2 = s[2];
+ uint64_t s3 = s[3];
+ uint64_t s4 = s[4];
+ if (s4 > (uint64_t )0x00000010000000)
+ return true;
+ else if (s4 < (uint64_t )0x00000010000000)
+ return false;
+ else if (s3 > (uint64_t )0x00000000000000)
+ return true;
+ else if (s2 > (uint64_t )0x000000000014de)
+ return true;
+ else if (s2 < (uint64_t )0x000000000014de)
+ return false;
+ else if (s1 > (uint64_t )0xf9dea2f79cd658)
+ return true;
+ else if (s1 < (uint64_t )0xf9dea2f79cd658)
+ return false;
+ else if (s0 >= (uint64_t )0x12631a5cf5d3ed)
+ return true;
+ else
+ return false;
+}
+
+static bool Hacl_Impl_Ed25519_PointEqual_eq(uint64_t *a, uint64_t *b)
+{
+ uint64_t a0 = a[0];
+ uint64_t a1 = a[1];
+ uint64_t a2 = a[2];
+ uint64_t a3 = a[3];
+ uint64_t a4 = a[4];
+ uint64_t b0 = b[0];
+ uint64_t b1 = b[1];
+ uint64_t b2 = b[2];
+ uint64_t b3 = b[3];
+ uint64_t b4 = b[4];
+ bool z = a0 == b0 && a1 == b1 && a2 == b2 && a3 == b3 && a4 == b4;
+ return z;
+}
+
+static bool
+Hacl_Impl_Ed25519_PointEqual_point_equal_1(uint64_t *p, uint64_t *q1, uint64_t *tmp)
+{
+ uint64_t *pxqz = tmp;
+ uint64_t *qxpz = tmp + (uint32_t )5;
+ (void )(tmp + (uint32_t )10);
+ (void )(tmp + (uint32_t )15);
+ Hacl_Bignum25519_fmul(pxqz,
+ Hacl_Impl_Ed25519_ExtPoint_getx(p),
+ Hacl_Impl_Ed25519_ExtPoint_getz(q1));
+ Hacl_Bignum25519_reduce(pxqz);
+ Hacl_Bignum25519_fmul(qxpz,
+ Hacl_Impl_Ed25519_ExtPoint_getx(q1),
+ Hacl_Impl_Ed25519_ExtPoint_getz(p));
+ Hacl_Bignum25519_reduce(qxpz);
+ bool b = Hacl_Impl_Ed25519_PointEqual_eq(pxqz, qxpz);
+ return b;
+}
+
+static bool
+Hacl_Impl_Ed25519_PointEqual_point_equal_2(uint64_t *p, uint64_t *q1, uint64_t *tmp)
+{
+ (void )(tmp + (uint32_t )5);
+ uint64_t *pyqz = tmp + (uint32_t )10;
+ uint64_t *qypz = tmp + (uint32_t )15;
+ Hacl_Bignum25519_fmul(pyqz,
+ Hacl_Impl_Ed25519_ExtPoint_gety(p),
+ Hacl_Impl_Ed25519_ExtPoint_getz(q1));
+ Hacl_Bignum25519_reduce(pyqz);
+ Hacl_Bignum25519_fmul(qypz,
+ Hacl_Impl_Ed25519_ExtPoint_gety(q1),
+ Hacl_Impl_Ed25519_ExtPoint_getz(p));
+ Hacl_Bignum25519_reduce(qypz);
+ bool b = Hacl_Impl_Ed25519_PointEqual_eq(pyqz, qypz);
+ return b;
+}
+
+static bool Hacl_Impl_Ed25519_PointEqual_point_equal_(uint64_t *p, uint64_t *q1, uint64_t *tmp)
+{
+ bool b = Hacl_Impl_Ed25519_PointEqual_point_equal_1(p, q1, tmp);
+ if (b == true)
+ return Hacl_Impl_Ed25519_PointEqual_point_equal_2(p, q1, tmp);
+ else
+ return false;
+}
+
+static bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q1)
+{
+ uint64_t tmp[20] = { 0 };
+ bool res = Hacl_Impl_Ed25519_PointEqual_point_equal_(p, q1, tmp);
+ return res;
+}
+
+static void Hacl_Impl_Load56_load_64_bytes(uint64_t *out, uint8_t *b)
+{
+ uint8_t *b80 = b;
+ uint64_t z = load64_le(b80);
+ uint64_t z_ = z & (uint64_t )0xffffffffffffff;
+ uint64_t b0 = z_;
+ uint8_t *b81 = b + (uint32_t )7;
+ uint64_t z0 = load64_le(b81);
+ uint64_t z_0 = z0 & (uint64_t )0xffffffffffffff;
+ uint64_t b1 = z_0;
+ uint8_t *b82 = b + (uint32_t )14;
+ uint64_t z1 = load64_le(b82);
+ uint64_t z_1 = z1 & (uint64_t )0xffffffffffffff;
+ uint64_t b2 = z_1;
+ uint8_t *b83 = b + (uint32_t )21;
+ uint64_t z2 = load64_le(b83);
+ uint64_t z_2 = z2 & (uint64_t )0xffffffffffffff;
+ uint64_t b3 = z_2;
+ uint8_t *b84 = b + (uint32_t )28;
+ uint64_t z3 = load64_le(b84);
+ uint64_t z_3 = z3 & (uint64_t )0xffffffffffffff;
+ uint64_t b4 = z_3;
+ uint8_t *b85 = b + (uint32_t )35;
+ uint64_t z4 = load64_le(b85);
+ uint64_t z_4 = z4 & (uint64_t )0xffffffffffffff;
+ uint64_t b5 = z_4;
+ uint8_t *b86 = b + (uint32_t )42;
+ uint64_t z5 = load64_le(b86);
+ uint64_t z_5 = z5 & (uint64_t )0xffffffffffffff;
+ uint64_t b6 = z_5;
+ uint8_t *b87 = b + (uint32_t )49;
+ uint64_t z6 = load64_le(b87);
+ uint64_t z_6 = z6 & (uint64_t )0xffffffffffffff;
+ uint64_t b7 = z_6;
+ uint8_t *b8 = b + (uint32_t )56;
+ uint64_t z7 = load64_le(b8);
+ uint64_t z_7 = z7 & (uint64_t )0xffffffffffffff;
+ uint64_t b88 = z_7;
+ uint8_t b63 = b[63];
+ uint64_t b9 = (uint64_t )b63;
+ Hacl_Lib_Create64_make_h64_10(out, b0, b1, b2, b3, b4, b5, b6, b7, b88, b9);
+}
+
+static void Hacl_Impl_Load56_load_32_bytes(uint64_t *out, uint8_t *b)
+{
+ uint8_t *b80 = b;
+ uint64_t z = load64_le(b80);
+ uint64_t z_ = z & (uint64_t )0xffffffffffffff;
+ uint64_t b0 = z_;
+ uint8_t *b81 = b + (uint32_t )7;
+ uint64_t z0 = load64_le(b81);
+ uint64_t z_0 = z0 & (uint64_t )0xffffffffffffff;
+ uint64_t b1 = z_0;
+ uint8_t *b82 = b + (uint32_t )14;
+ uint64_t z1 = load64_le(b82);
+ uint64_t z_1 = z1 & (uint64_t )0xffffffffffffff;
+ uint64_t b2 = z_1;
+ uint8_t *b8 = b + (uint32_t )21;
+ uint64_t z2 = load64_le(b8);
+ uint64_t z_2 = z2 & (uint64_t )0xffffffffffffff;
+ uint64_t b3 = z_2;
+ uint8_t *x0 = b + (uint32_t )28;
+ uint32_t b4 = load32_le(x0);
+ uint64_t b41 = (uint64_t )b4;
+ Hacl_Lib_Create64_make_h64_5(out, b0, b1, b2, b3, b41);
+}
+
+inline static void Hacl_Impl_Ed25519_Pow2_252m2_pow2_252m2(uint64_t *out, uint64_t *z)
+{
+ Hacl_Bignum_Crecip_crecip_(out, z);
+}
+
+static bool Hacl_Impl_Ed25519_RecoverX_is_0(uint64_t *x)
+{
+ uint64_t x0 = x[0];
+ uint64_t x1 = x[1];
+ uint64_t x2 = x[2];
+ uint64_t x3 = x[3];
+ uint64_t x4 = x[4];
+ return
+ x0
+ == (uint64_t )0
+ && x1 == (uint64_t )0
+ && x2 == (uint64_t )0
+ && x3 == (uint64_t )0
+ && x4 == (uint64_t )0;
+}
+
+static void
+Hacl_Impl_Ed25519_RecoverX_recover_x_step_5(uint64_t *x, uint64_t sign1, uint64_t *tmp)
+{
+ uint64_t *x3 = tmp + (uint32_t )5;
+ uint64_t *t0 = tmp + (uint32_t )10;
+ uint64_t x0 = x3[0];
+ uint64_t x00 = x0 & (uint64_t )1;
+ if (!(x00 == sign1))
+ {
+ uint64_t zero1 = (uint64_t )0;
+ Hacl_Lib_Create64_make_h64_5(t0, zero1, zero1, zero1, zero1, zero1);
+ Hacl_Bignum25519_fdifference(x3, t0);
+ Hacl_Bignum25519_reduce_513(x3);
+ Hacl_Bignum25519_reduce(x3);
+ }
+ memcpy(x, x3, (uint32_t )5 * sizeof x3[0]);
+}
+
+static bool
+Hacl_Impl_Ed25519_RecoverX_recover_x_(uint64_t *x, uint64_t *y, uint64_t sign1, uint64_t *tmp)
+{
+ uint64_t *x20 = tmp;
+ uint64_t x0 = y[0];
+ uint64_t x1 = y[1];
+ uint64_t x2 = y[2];
+ uint64_t x30 = y[3];
+ uint64_t x4 = y[4];
+ bool
+ b =
+ x0
+ >= (uint64_t )0x7ffffffffffed
+ && x1 == (uint64_t )0x7ffffffffffff
+ && x2 == (uint64_t )0x7ffffffffffff
+ && x30 == (uint64_t )0x7ffffffffffff
+ && x4 == (uint64_t )0x7ffffffffffff;
+ if (b)
+ return false;
+ else
+ {
+ uint64_t tmp0[25] = { 0 };
+ uint64_t *one10 = tmp0;
+ uint64_t *y2 = tmp0 + (uint32_t )5;
+ uint64_t *dyyi = tmp0 + (uint32_t )10;
+ uint64_t *dyy = tmp0 + (uint32_t )15;
+ uint64_t zero10 = (uint64_t )0;
+ uint64_t one1 = (uint64_t )1;
+ Hacl_Lib_Create64_make_h64_5(one10, one1, zero10, zero10, zero10, zero10);
+ Hacl_Bignum25519_fsquare(y2, y);
+ Hacl_Bignum25519_times_d(dyy, y2);
+ Hacl_Bignum25519_fsum(dyy, one10);
+ Hacl_Bignum25519_reduce_513(dyy);
+ Hacl_Bignum25519_inverse(dyyi, dyy);
+ Hacl_Bignum25519_fdifference(one10, y2);
+ Hacl_Bignum25519_fmul(x20, dyyi, one10);
+ Hacl_Bignum25519_reduce(x20);
+ bool x2_is_0 = Hacl_Impl_Ed25519_RecoverX_is_0(x20);
+ uint8_t z;
+ if (x2_is_0)
+ {
+ uint8_t ite;
+ if (sign1 == (uint64_t )0)
+ {
+ uint64_t zero1 = (uint64_t )0;
+ Hacl_Lib_Create64_make_h64_5(x, zero1, zero1, zero1, zero1, zero1);
+ ite = (uint8_t )1;
+ }
+ else
+ ite = (uint8_t )0;
+ z = ite;
+ }
+ else
+ z = (uint8_t )2;
+ if (z == (uint8_t )0)
+ return false;
+ else if (z == (uint8_t )1)
+ return true;
+ else
+ {
+ uint64_t *x2 = tmp;
+ uint64_t *x30 = tmp + (uint32_t )5;
+ uint64_t *t00 = tmp + (uint32_t )10;
+ uint64_t *t10 = tmp + (uint32_t )15;
+ Hacl_Impl_Ed25519_Pow2_252m2_pow2_252m2(x30, x2);
+ Hacl_Bignum25519_fsquare(t00, x30);
+ memcpy(t10, x2, (uint32_t )5 * sizeof x2[0]);
+ Hacl_Bignum25519_fdifference(t10, t00);
+ Hacl_Bignum25519_reduce_513(t10);
+ Hacl_Bignum25519_reduce(t10);
+ bool t1_is_0 = Hacl_Impl_Ed25519_RecoverX_is_0(t10);
+ if (t1_is_0)
+ {
+
+ }
+ else
+ {
+ uint64_t sqrt_m1[5] = { 0 };
+ Hacl_Lib_Create64_make_h64_5(sqrt_m1,
+ (uint64_t )0x00061b274a0ea0b0,
+ (uint64_t )0x0000d5a5fc8f189d,
+ (uint64_t )0x0007ef5e9cbd0c60,
+ (uint64_t )0x00078595a6804c9e,
+ (uint64_t )0x0002b8324804fc1d);
+ Hacl_Bignum25519_fmul(x30, x30, sqrt_m1);
+ }
+ Hacl_Bignum25519_reduce(x30);
+ uint64_t *x20 = tmp;
+ uint64_t *x3 = tmp + (uint32_t )5;
+ uint64_t *t0 = tmp + (uint32_t )10;
+ uint64_t *t1 = tmp + (uint32_t )15;
+ Hacl_Bignum25519_fsquare(t0, x3);
+ memcpy(t1, x20, (uint32_t )5 * sizeof x20[0]);
+ Hacl_Bignum25519_fdifference(t1, t0);
+ Hacl_Bignum25519_reduce_513(t1);
+ Hacl_Bignum25519_reduce(t1);
+ bool z1 = Hacl_Impl_Ed25519_RecoverX_is_0(t1);
+ if (z1 == false)
+ return false;
+ else
+ {
+ Hacl_Impl_Ed25519_RecoverX_recover_x_step_5(x, sign1, tmp);
+ return true;
+ }
+ }
+ }
+}
+
+static bool Hacl_Impl_Ed25519_RecoverX_recover_x(uint64_t *x, uint64_t *y, uint64_t sign1)
+{
+ uint64_t tmp[20] = { 0 };
+ bool res = Hacl_Impl_Ed25519_RecoverX_recover_x_(x, y, sign1, tmp);
+ return res;
+}
+
+static void Hacl_Impl_Load51_load_51(uint64_t *output, uint8_t *input)
+{
+ Hacl_EC_Format_fexpand(output, input);
+}
+
+static bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *s)
+{
+ uint64_t tmp[10] = { 0 };
+ uint64_t *y0 = tmp;
+ uint64_t *x0 = tmp + (uint32_t )5;
+ uint64_t *y = tmp;
+ uint64_t *x = tmp + (uint32_t )5;
+ uint8_t s31 = s[31];
+ uint64_t sign1 = (uint64_t )(s31 >> (uint32_t )7);
+ Hacl_Impl_Load51_load_51(y, s);
+ bool z = Hacl_Impl_Ed25519_RecoverX_recover_x(x, y, sign1);
+ bool z0 = z;
+ bool res;
+ if (z0 == false)
+ res = false;
+ else
+ {
+ uint64_t *outx = Hacl_Impl_Ed25519_ExtPoint_getx(out);
+ uint64_t *outy = Hacl_Impl_Ed25519_ExtPoint_gety(out);
+ uint64_t *outz = Hacl_Impl_Ed25519_ExtPoint_getz(out);
+ uint64_t *outt = Hacl_Impl_Ed25519_ExtPoint_gett(out);
+ memcpy(outx, x0, (uint32_t )5 * sizeof x0[0]);
+ memcpy(outy, y0, (uint32_t )5 * sizeof y0[0]);
+ uint64_t zero1 = (uint64_t )0;
+ uint64_t one1 = (uint64_t )1;
+ Hacl_Lib_Create64_make_h64_5(outz, one1, zero1, zero1, zero1, zero1);
+ Hacl_Bignum25519_fmul(outt, x0, y0);
+ res = true;
+ }
+ return res;
+}
+
+static void Hacl_Impl_Store56_store_56(uint8_t *out, uint64_t *b)
+{
+ uint64_t b0 = b[0];
+ uint64_t b1 = b[1];
+ uint64_t b2 = b[2];
+ uint64_t b3 = b[3];
+ uint64_t b4 = b[4];
+ uint32_t b41 = (uint32_t )b4;
+ uint8_t *b8 = out;
+ store64_le(b8, b0);
+ uint8_t *b80 = out + (uint32_t )7;
+ store64_le(b80, b1);
+ uint8_t *b81 = out + (uint32_t )14;
+ store64_le(b81, b2);
+ uint8_t *b82 = out + (uint32_t )21;
+ store64_le(b82, b3);
+ uint8_t *x0 = out + (uint32_t )28;
+ store32_le(x0, b41);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_2_hash_block_and_rest(
+ uint8_t *out,
+ uint8_t *block,
+ uint8_t *msg,
+ uint32_t len1
+)
+{
+ uint32_t nblocks = len1 >> (uint32_t )7;
+ uint64_t rest = (uint64_t )(len1 & (uint32_t )127);
+ uint64_t st[169] = { 0 };
+ Hacl_Hash_SHA2_512_init(st);
+ Hacl_Hash_SHA2_512_update(st, block);
+ Hacl_Hash_SHA2_512_update_multi(st, msg, nblocks);
+ Hacl_Hash_SHA2_512_update_last(st, msg + (uint32_t )128 * nblocks, rest);
+ Hacl_Hash_SHA2_512_finish(st, out);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_1_copy_bytes(uint8_t *output, uint8_t *input, uint32_t len1)
+{
+ memcpy(output, input, len1 * sizeof input[0]);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_1_concat_2(uint8_t *out, uint8_t *pre, uint8_t *msg, uint32_t len1)
+{
+ Hacl_Impl_SHA512_Ed25519_1_copy_bytes(out, pre, (uint32_t )32);
+ Hacl_Impl_SHA512_Ed25519_1_copy_bytes(out + (uint32_t )32, msg, len1);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_1_concat_3(
+ uint8_t *out,
+ uint8_t *pre,
+ uint8_t *pre2,
+ uint8_t *msg,
+ uint32_t len1
+)
+{
+ Hacl_Impl_SHA512_Ed25519_1_copy_bytes(out, pre, (uint32_t )32);
+ Hacl_Impl_SHA512_Ed25519_1_copy_bytes(out + (uint32_t )32, pre2, (uint32_t )32);
+ Hacl_Impl_SHA512_Ed25519_1_copy_bytes(out + (uint32_t )64, msg, len1);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_1_sha512_pre_msg_1(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint8_t block[128] = { 0 };
+ uint8_t *block_ = block;
+ Hacl_Impl_SHA512_Ed25519_1_concat_2(block_, prefix, input, len1);
+ Hacl_Hash_SHA2_512_hash(h, block_, len1 + (uint32_t )32);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_1_sha512_pre_pre2_msg_1(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *prefix2,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint8_t block[128] = { 0 };
+ uint8_t *block_ = block;
+ Hacl_Impl_SHA512_Ed25519_1_concat_3(block_, prefix, prefix2, input, len1);
+ Hacl_Hash_SHA2_512_hash(h, block_, len1 + (uint32_t )64);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_3_sha512_pre_msg_2(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint8_t *input11 = input;
+ uint8_t *input21 = input + (uint32_t )96;
+ uint8_t block[128] = { 0 };
+ Hacl_Impl_SHA512_Ed25519_1_concat_2(block, prefix, input11, (uint32_t )96);
+ Hacl_Impl_SHA512_Ed25519_2_hash_block_and_rest(h, block, input21, len1 - (uint32_t )96);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_3_sha512_pre_msg(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ if (len1 <= (uint32_t )96)
+ Hacl_Impl_SHA512_Ed25519_1_sha512_pre_msg_1(h, prefix, input, len1);
+ else
+ Hacl_Impl_SHA512_Ed25519_3_sha512_pre_msg_2(h, prefix, input, len1);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_3_sha512_pre_pre2_msg_2(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *prefix2,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint8_t *input11 = input;
+ uint8_t *input21 = input + (uint32_t )64;
+ uint8_t block[128] = { 0 };
+ Hacl_Impl_SHA512_Ed25519_1_concat_3(block, prefix, prefix2, input11, (uint32_t )64);
+ Hacl_Impl_SHA512_Ed25519_2_hash_block_and_rest(h, block, input21, len1 - (uint32_t )64);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_3_sha512_pre_pre2_msg(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *prefix2,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ if (len1 <= (uint32_t )64)
+ Hacl_Impl_SHA512_Ed25519_1_sha512_pre_pre2_msg_1(h, prefix, prefix2, input, len1);
+ else
+ Hacl_Impl_SHA512_Ed25519_3_sha512_pre_pre2_msg_2(h, prefix, prefix2, input, len1);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_sha512_pre_msg(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ Hacl_Impl_SHA512_Ed25519_3_sha512_pre_msg(h, prefix, input, len1);
+}
+
+static void
+Hacl_Impl_SHA512_Ed25519_sha512_pre_pre2_msg(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *prefix2,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ Hacl_Impl_SHA512_Ed25519_3_sha512_pre_pre2_msg(h, prefix, prefix2, input, len1);
+}
+
+static void
+Hacl_Impl_Sha512_sha512_pre_msg(uint8_t *h, uint8_t *prefix, uint8_t *input, uint32_t len1)
+{
+ Hacl_Impl_SHA512_Ed25519_sha512_pre_msg(h, prefix, input, len1);
+}
+
+static void
+Hacl_Impl_Sha512_sha512_pre_pre2_msg(
+ uint8_t *h,
+ uint8_t *prefix,
+ uint8_t *prefix2,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ Hacl_Impl_SHA512_Ed25519_sha512_pre_pre2_msg(h, prefix, prefix2, input, len1);
+}
+
+static void
+Hacl_Lib_Create128_make_h128_9(
+ FStar_UInt128_t *b,
+ FStar_UInt128_t s0,
+ FStar_UInt128_t s1,
+ FStar_UInt128_t s2,
+ FStar_UInt128_t s3,
+ FStar_UInt128_t s4,
+ FStar_UInt128_t s5,
+ FStar_UInt128_t s6,
+ FStar_UInt128_t s7,
+ FStar_UInt128_t s8
+)
+{
+ b[0] = s0;
+ b[1] = s1;
+ b[2] = s2;
+ b[3] = s3;
+ b[4] = s4;
+ b[5] = s5;
+ b[6] = s6;
+ b[7] = s7;
+ b[8] = s8;
+}
+
+static void Hacl_Impl_BignumQ_Mul_make_m(uint64_t *m1)
+{
+ Hacl_Lib_Create64_make_h64_5(m1,
+ (uint64_t )0x12631a5cf5d3ed,
+ (uint64_t )0xf9dea2f79cd658,
+ (uint64_t )0x000000000014de,
+ (uint64_t )0x00000000000000,
+ (uint64_t )0x00000010000000);
+}
+
+static void Hacl_Impl_BignumQ_Mul_make_mu(uint64_t *m1)
+{
+ Hacl_Lib_Create64_make_h64_5(m1,
+ (uint64_t )0x9ce5a30a2c131b,
+ (uint64_t )0x215d086329a7ed,
+ (uint64_t )0xffffffffeb2106,
+ (uint64_t )0xffffffffffffff,
+ (uint64_t )0x00000fffffffff);
+}
+
+static void Hacl_Impl_BignumQ_Mul_choose(uint64_t *z, uint64_t *x, uint64_t *y, uint64_t b)
+{
+ uint64_t mask = b - (uint64_t )1;
+ uint64_t x0 = x[0];
+ uint64_t x1 = x[1];
+ uint64_t x2 = x[2];
+ uint64_t x3 = x[3];
+ uint64_t x4 = x[4];
+ uint64_t y0 = y[0];
+ uint64_t y1 = y[1];
+ uint64_t y2 = y[2];
+ uint64_t y3 = y[3];
+ uint64_t y4 = y[4];
+ uint64_t z0 = (y0 ^ x0) & mask ^ x0;
+ uint64_t z1 = (y1 ^ x1) & mask ^ x1;
+ uint64_t z2 = (y2 ^ x2) & mask ^ x2;
+ uint64_t z3 = (y3 ^ x3) & mask ^ x3;
+ uint64_t z4 = (y4 ^ x4) & mask ^ x4;
+ Hacl_Lib_Create64_make_h64_5(z, z0, z1, z2, z3, z4);
+}
+
+static uint64_t Hacl_Impl_BignumQ_Mul_lt(uint64_t a, uint64_t b)
+{
+ return a - b >> (uint32_t )63;
+}
+
+static uint64_t Hacl_Impl_BignumQ_Mul_shiftl_56(uint64_t b)
+{
+ return b << (uint32_t )56;
+}
+
+static void Hacl_Impl_BignumQ_Mul_sub_mod_264(uint64_t *z, uint64_t *x, uint64_t *y)
+{
+ uint64_t x0 = x[0];
+ uint64_t x1 = x[1];
+ uint64_t x2 = x[2];
+ uint64_t x3 = x[3];
+ uint64_t x4 = x[4];
+ uint64_t y0 = y[0];
+ uint64_t y1 = y[1];
+ uint64_t y2 = y[2];
+ uint64_t y3 = y[3];
+ uint64_t y4 = y[4];
+ uint64_t b = Hacl_Impl_BignumQ_Mul_lt(x0, y0);
+ uint64_t t0 = Hacl_Impl_BignumQ_Mul_shiftl_56(b) + x0 - y0;
+ uint64_t y11 = y1 + b;
+ uint64_t b1 = Hacl_Impl_BignumQ_Mul_lt(x1, y11);
+ uint64_t t1 = Hacl_Impl_BignumQ_Mul_shiftl_56(b1) + x1 - y11;
+ uint64_t y21 = y2 + b1;
+ uint64_t b2 = Hacl_Impl_BignumQ_Mul_lt(x2, y21);
+ uint64_t t2 = Hacl_Impl_BignumQ_Mul_shiftl_56(b2) + x2 - y21;
+ uint64_t y31 = y3 + b2;
+ uint64_t b3 = Hacl_Impl_BignumQ_Mul_lt(x3, y31);
+ uint64_t t3 = Hacl_Impl_BignumQ_Mul_shiftl_56(b3) + x3 - y31;
+ uint64_t y41 = y4 + b3;
+ uint64_t b4 = Hacl_Impl_BignumQ_Mul_lt(x4, y41);
+ uint64_t t4 = (b4 << (uint32_t )40) + x4 - y41;
+ Hacl_Lib_Create64_make_h64_5(z, t0, t1, t2, t3, t4);
+}
+
+static void Hacl_Impl_BignumQ_Mul_subm_conditional(uint64_t *z, uint64_t *x)
+{
+ uint64_t tmp[5] = { 0 };
+ uint64_t x0 = x[0];
+ uint64_t x1 = x[1];
+ uint64_t x2 = x[2];
+ uint64_t x3 = x[3];
+ uint64_t x4 = x[4];
+ Hacl_Lib_Create64_make_h64_5(tmp, x0, x1, x2, x3, x4);
+ uint64_t y0 = (uint64_t )0x12631a5cf5d3ed;
+ uint64_t y1 = (uint64_t )0xf9dea2f79cd658;
+ uint64_t y2 = (uint64_t )0x000000000014de;
+ uint64_t y3 = (uint64_t )0x00000000000000;
+ uint64_t y4 = (uint64_t )0x00000010000000;
+ uint64_t b = Hacl_Impl_BignumQ_Mul_lt(x0, y0);
+ uint64_t t0 = Hacl_Impl_BignumQ_Mul_shiftl_56(b) + x0 - y0;
+ uint64_t y11 = y1 + b;
+ uint64_t b1 = Hacl_Impl_BignumQ_Mul_lt(x1, y11);
+ uint64_t t1 = Hacl_Impl_BignumQ_Mul_shiftl_56(b1) + x1 - y11;
+ uint64_t y21 = y2 + b1;
+ uint64_t b2 = Hacl_Impl_BignumQ_Mul_lt(x2, y21);
+ uint64_t t2 = Hacl_Impl_BignumQ_Mul_shiftl_56(b2) + x2 - y21;
+ uint64_t y31 = y3 + b2;
+ uint64_t b3 = Hacl_Impl_BignumQ_Mul_lt(x3, y31);
+ uint64_t t3 = Hacl_Impl_BignumQ_Mul_shiftl_56(b3) + x3 - y31;
+ uint64_t y41 = y4 + b3;
+ uint64_t b4 = Hacl_Impl_BignumQ_Mul_lt(x4, y41);
+ uint64_t t4 = Hacl_Impl_BignumQ_Mul_shiftl_56(b4) + x4 - y41;
+ Hacl_Lib_Create64_make_h64_5(z, t0, t1, t2, t3, t4);
+ Hacl_Impl_BignumQ_Mul_choose(z, tmp, z, b4);
+}
+
+static void Hacl_Impl_BignumQ_Mul_low_mul_5(uint64_t *z, uint64_t *x, uint64_t *y)
+{
+ uint64_t x0 = x[0];
+ uint64_t x1 = x[1];
+ uint64_t x2 = x[2];
+ uint64_t x3 = x[3];
+ uint64_t x4 = x[4];
+ uint64_t y0 = y[0];
+ uint64_t y1 = y[1];
+ uint64_t y2 = y[2];
+ uint64_t y3 = y[3];
+ uint64_t y4 = y[4];
+ FStar_UInt128_t xy00 = FStar_UInt128_mul_wide(x0, y0);
+ FStar_UInt128_t xy01 = FStar_UInt128_mul_wide(x0, y1);
+ FStar_UInt128_t xy02 = FStar_UInt128_mul_wide(x0, y2);
+ FStar_UInt128_t xy03 = FStar_UInt128_mul_wide(x0, y3);
+ FStar_UInt128_t xy04 = FStar_UInt128_mul_wide(x0, y4);
+ FStar_UInt128_t xy10 = FStar_UInt128_mul_wide(x1, y0);
+ FStar_UInt128_t xy11 = FStar_UInt128_mul_wide(x1, y1);
+ FStar_UInt128_t xy12 = FStar_UInt128_mul_wide(x1, y2);
+ FStar_UInt128_t xy13 = FStar_UInt128_mul_wide(x1, y3);
+ FStar_UInt128_t xy20 = FStar_UInt128_mul_wide(x2, y0);
+ FStar_UInt128_t xy21 = FStar_UInt128_mul_wide(x2, y1);
+ FStar_UInt128_t xy22 = FStar_UInt128_mul_wide(x2, y2);
+ FStar_UInt128_t xy30 = FStar_UInt128_mul_wide(x3, y0);
+ FStar_UInt128_t xy31 = FStar_UInt128_mul_wide(x3, y1);
+ FStar_UInt128_t xy40 = FStar_UInt128_mul_wide(x4, y0);
+ FStar_UInt128_t x5 = xy00;
+ FStar_UInt128_t carry1 = FStar_UInt128_shift_right(x5, (uint32_t )56);
+ uint64_t t = FStar_Int_Cast_Full_uint128_to_uint64(x5) & (uint64_t )0xffffffffffffff;
+ uint64_t t0 = t;
+ FStar_UInt128_t x6 = FStar_UInt128_add(FStar_UInt128_add(xy01, xy10), carry1);
+ FStar_UInt128_t carry2 = FStar_UInt128_shift_right(x6, (uint32_t )56);
+ uint64_t t1 = FStar_Int_Cast_Full_uint128_to_uint64(x6) & (uint64_t )0xffffffffffffff;
+ uint64_t t11 = t1;
+ FStar_UInt128_t
+ x7 = FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(xy02, xy11), xy20), carry2);
+ FStar_UInt128_t carry3 = FStar_UInt128_shift_right(x7, (uint32_t )56);
+ uint64_t t2 = FStar_Int_Cast_Full_uint128_to_uint64(x7) & (uint64_t )0xffffffffffffff;
+ uint64_t t21 = t2;
+ FStar_UInt128_t
+ x8 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(xy03, xy12), xy21),
+ xy30),
+ carry3);
+ FStar_UInt128_t carry4 = FStar_UInt128_shift_right(x8, (uint32_t )56);
+ uint64_t t3 = FStar_Int_Cast_Full_uint128_to_uint64(x8) & (uint64_t )0xffffffffffffff;
+ uint64_t t31 = t3;
+ uint64_t
+ t4 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(xy04,
+ xy13),
+ xy22),
+ xy31),
+ xy40),
+ carry4))
+ & (uint64_t )0xffffffffff;
+ Hacl_Lib_Create64_make_h64_5(z, t0, t11, t21, t31, t4);
+}
+
+static void Hacl_Impl_BignumQ_Mul_mul_5(FStar_UInt128_t *z, uint64_t *x, uint64_t *y)
+{
+ uint64_t x0 = x[0];
+ uint64_t x1 = x[1];
+ uint64_t x2 = x[2];
+ uint64_t x3 = x[3];
+ uint64_t x4 = x[4];
+ uint64_t y0 = y[0];
+ uint64_t y1 = y[1];
+ uint64_t y2 = y[2];
+ uint64_t y3 = y[3];
+ uint64_t y4 = y[4];
+ FStar_UInt128_t xy00 = FStar_UInt128_mul_wide(x0, y0);
+ FStar_UInt128_t xy01 = FStar_UInt128_mul_wide(x0, y1);
+ FStar_UInt128_t xy02 = FStar_UInt128_mul_wide(x0, y2);
+ FStar_UInt128_t xy03 = FStar_UInt128_mul_wide(x0, y3);
+ FStar_UInt128_t xy04 = FStar_UInt128_mul_wide(x0, y4);
+ FStar_UInt128_t xy10 = FStar_UInt128_mul_wide(x1, y0);
+ FStar_UInt128_t xy11 = FStar_UInt128_mul_wide(x1, y1);
+ FStar_UInt128_t xy12 = FStar_UInt128_mul_wide(x1, y2);
+ FStar_UInt128_t xy13 = FStar_UInt128_mul_wide(x1, y3);
+ FStar_UInt128_t xy14 = FStar_UInt128_mul_wide(x1, y4);
+ FStar_UInt128_t xy20 = FStar_UInt128_mul_wide(x2, y0);
+ FStar_UInt128_t xy21 = FStar_UInt128_mul_wide(x2, y1);
+ FStar_UInt128_t xy22 = FStar_UInt128_mul_wide(x2, y2);
+ FStar_UInt128_t xy23 = FStar_UInt128_mul_wide(x2, y3);
+ FStar_UInt128_t xy24 = FStar_UInt128_mul_wide(x2, y4);
+ FStar_UInt128_t xy30 = FStar_UInt128_mul_wide(x3, y0);
+ FStar_UInt128_t xy31 = FStar_UInt128_mul_wide(x3, y1);
+ FStar_UInt128_t xy32 = FStar_UInt128_mul_wide(x3, y2);
+ FStar_UInt128_t xy33 = FStar_UInt128_mul_wide(x3, y3);
+ FStar_UInt128_t xy34 = FStar_UInt128_mul_wide(x3, y4);
+ FStar_UInt128_t xy40 = FStar_UInt128_mul_wide(x4, y0);
+ FStar_UInt128_t xy41 = FStar_UInt128_mul_wide(x4, y1);
+ FStar_UInt128_t xy42 = FStar_UInt128_mul_wide(x4, y2);
+ FStar_UInt128_t xy43 = FStar_UInt128_mul_wide(x4, y3);
+ FStar_UInt128_t xy44 = FStar_UInt128_mul_wide(x4, y4);
+ FStar_UInt128_t z0 = xy00;
+ FStar_UInt128_t z1 = FStar_UInt128_add(xy01, xy10);
+ FStar_UInt128_t z2 = FStar_UInt128_add(FStar_UInt128_add(xy02, xy11), xy20);
+ FStar_UInt128_t
+ z3 = FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(xy03, xy12), xy21), xy30);
+ FStar_UInt128_t
+ z4 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(xy04, xy13), xy22),
+ xy31),
+ xy40);
+ FStar_UInt128_t
+ z5 = FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_add(xy14, xy23), xy32), xy41);
+ FStar_UInt128_t z6 = FStar_UInt128_add(FStar_UInt128_add(xy24, xy33), xy42);
+ FStar_UInt128_t z7 = FStar_UInt128_add(xy34, xy43);
+ FStar_UInt128_t z8 = xy44;
+ Hacl_Lib_Create128_make_h128_9(z, z0, z1, z2, z3, z4, z5, z6, z7, z8);
+}
+
+static void Hacl_Impl_BignumQ_Mul_carry(uint64_t *out, FStar_UInt128_t *z)
+{
+ FStar_UInt128_t z0 = z[0];
+ FStar_UInt128_t z1 = z[1];
+ FStar_UInt128_t z2 = z[2];
+ FStar_UInt128_t z3 = z[3];
+ FStar_UInt128_t z4 = z[4];
+ FStar_UInt128_t z5 = z[5];
+ FStar_UInt128_t z6 = z[6];
+ FStar_UInt128_t z7 = z[7];
+ FStar_UInt128_t z8 = z[8];
+ FStar_UInt128_t x = z0;
+ FStar_UInt128_t y = z1;
+ FStar_UInt128_t carry1 = FStar_UInt128_shift_right(x, (uint32_t )56);
+ uint64_t t = FStar_Int_Cast_Full_uint128_to_uint64(x) & (uint64_t )0xffffffffffffff;
+ uint64_t x0 = t;
+ FStar_UInt128_t z1_ = FStar_UInt128_add(y, carry1);
+ FStar_UInt128_t x1 = z1_;
+ FStar_UInt128_t y1 = z2;
+ FStar_UInt128_t carry2 = FStar_UInt128_shift_right(x1, (uint32_t )56);
+ uint64_t t1 = FStar_Int_Cast_Full_uint128_to_uint64(x1) & (uint64_t )0xffffffffffffff;
+ uint64_t x11 = t1;
+ FStar_UInt128_t z2_ = FStar_UInt128_add(y1, carry2);
+ FStar_UInt128_t x2 = z2_;
+ FStar_UInt128_t y2 = z3;
+ FStar_UInt128_t carry3 = FStar_UInt128_shift_right(x2, (uint32_t )56);
+ uint64_t t2 = FStar_Int_Cast_Full_uint128_to_uint64(x2) & (uint64_t )0xffffffffffffff;
+ uint64_t x21 = t2;
+ FStar_UInt128_t z3_ = FStar_UInt128_add(y2, carry3);
+ FStar_UInt128_t x3 = z3_;
+ FStar_UInt128_t y3 = z4;
+ FStar_UInt128_t carry4 = FStar_UInt128_shift_right(x3, (uint32_t )56);
+ uint64_t t3 = FStar_Int_Cast_Full_uint128_to_uint64(x3) & (uint64_t )0xffffffffffffff;
+ uint64_t x31 = t3;
+ FStar_UInt128_t z4_ = FStar_UInt128_add(y3, carry4);
+ FStar_UInt128_t x4 = z4_;
+ FStar_UInt128_t y4 = z5;
+ FStar_UInt128_t carry5 = FStar_UInt128_shift_right(x4, (uint32_t )56);
+ uint64_t t4 = FStar_Int_Cast_Full_uint128_to_uint64(x4) & (uint64_t )0xffffffffffffff;
+ uint64_t x41 = t4;
+ FStar_UInt128_t z5_ = FStar_UInt128_add(y4, carry5);
+ FStar_UInt128_t x5 = z5_;
+ FStar_UInt128_t y5 = z6;
+ FStar_UInt128_t carry6 = FStar_UInt128_shift_right(x5, (uint32_t )56);
+ uint64_t t5 = FStar_Int_Cast_Full_uint128_to_uint64(x5) & (uint64_t )0xffffffffffffff;
+ uint64_t x51 = t5;
+ FStar_UInt128_t z6_ = FStar_UInt128_add(y5, carry6);
+ FStar_UInt128_t x6 = z6_;
+ FStar_UInt128_t y6 = z7;
+ FStar_UInt128_t carry7 = FStar_UInt128_shift_right(x6, (uint32_t )56);
+ uint64_t t6 = FStar_Int_Cast_Full_uint128_to_uint64(x6) & (uint64_t )0xffffffffffffff;
+ uint64_t x61 = t6;
+ FStar_UInt128_t z7_ = FStar_UInt128_add(y6, carry7);
+ FStar_UInt128_t x7 = z7_;
+ FStar_UInt128_t y7 = z8;
+ FStar_UInt128_t carry8 = FStar_UInt128_shift_right(x7, (uint32_t )56);
+ uint64_t t7 = FStar_Int_Cast_Full_uint128_to_uint64(x7) & (uint64_t )0xffffffffffffff;
+ uint64_t x71 = t7;
+ FStar_UInt128_t z8_ = FStar_UInt128_add(y7, carry8);
+ FStar_UInt128_t x8 = z8_;
+ FStar_UInt128_t y8 = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ FStar_UInt128_t carry9 = FStar_UInt128_shift_right(x8, (uint32_t )56);
+ uint64_t t8 = FStar_Int_Cast_Full_uint128_to_uint64(x8) & (uint64_t )0xffffffffffffff;
+ uint64_t x81 = t8;
+ FStar_UInt128_t z9_ = FStar_UInt128_add(y8, carry9);
+ uint64_t x9 = FStar_Int_Cast_Full_uint128_to_uint64(z9_);
+ Hacl_Lib_Create64_make_h64_10(out, x0, x11, x21, x31, x41, x51, x61, x71, x81, x9);
+}
+
+static void Hacl_Impl_BignumQ_Mul_mod_264(uint64_t *r, uint64_t *t)
+{
+ uint64_t x0 = t[0];
+ uint64_t x1 = t[1];
+ uint64_t x2 = t[2];
+ uint64_t x3 = t[3];
+ uint64_t x4 = t[4];
+ uint64_t x4_ = x4 & (uint64_t )0xffffffffff;
+ Hacl_Lib_Create64_make_h64_5(r, x0, x1, x2, x3, x4_);
+}
+
+static void Hacl_Impl_BignumQ_Mul_div_248(uint64_t *out, uint64_t *t)
+{
+ (void )t[0];
+ (void )t[1];
+ (void )t[2];
+ (void )t[3];
+ uint64_t x4 = t[4];
+ uint64_t x5 = t[5];
+ uint64_t x6 = t[6];
+ uint64_t x7 = t[7];
+ uint64_t x8 = t[8];
+ uint64_t x9 = t[9];
+ uint64_t z0 = (x5 & (uint64_t )0xffffff) << (uint32_t )32 | x4 >> (uint32_t )24;
+ uint64_t z1 = (x6 & (uint64_t )0xffffff) << (uint32_t )32 | x5 >> (uint32_t )24;
+ uint64_t z2 = (x7 & (uint64_t )0xffffff) << (uint32_t )32 | x6 >> (uint32_t )24;
+ uint64_t z3 = (x8 & (uint64_t )0xffffff) << (uint32_t )32 | x7 >> (uint32_t )24;
+ uint64_t z4 = (x9 & (uint64_t )0xffffff) << (uint32_t )32 | x8 >> (uint32_t )24;
+ Hacl_Lib_Create64_make_h64_5(out, z0, z1, z2, z3, z4);
+}
+
+static void Hacl_Impl_BignumQ_Mul_div_264(uint64_t *out, uint64_t *t)
+{
+ (void )t[0];
+ (void )t[1];
+ (void )t[2];
+ (void )t[3];
+ uint64_t x4 = t[4];
+ uint64_t x5 = t[5];
+ uint64_t x6 = t[6];
+ uint64_t x7 = t[7];
+ uint64_t x8 = t[8];
+ uint64_t x9 = t[9];
+ uint64_t z0 = (x5 & (uint64_t )0xffffffffff) << (uint32_t )16 | x4 >> (uint32_t )40;
+ uint64_t z1 = (x6 & (uint64_t )0xffffffffff) << (uint32_t )16 | x5 >> (uint32_t )40;
+ uint64_t z2 = (x7 & (uint64_t )0xffffffffff) << (uint32_t )16 | x6 >> (uint32_t )40;
+ uint64_t z3 = (x8 & (uint64_t )0xffffffffff) << (uint32_t )16 | x7 >> (uint32_t )40;
+ uint64_t z4 = (x9 & (uint64_t )0xffffffffff) << (uint32_t )16 | x8 >> (uint32_t )40;
+ Hacl_Lib_Create64_make_h64_5(out, z0, z1, z2, z3, z4);
+}
+
+static void
+Hacl_Impl_BignumQ_Mul_barrett_reduction__1(
+ FStar_UInt128_t *qmu,
+ uint64_t *t,
+ uint64_t *mu1,
+ uint64_t *tmp
+)
+{
+ uint64_t *q1 = tmp;
+ uint64_t *qmu_ = tmp + (uint32_t )10;
+ uint64_t *qmu_264 = tmp + (uint32_t )20;
+ Hacl_Impl_BignumQ_Mul_div_248(q1, t);
+ Hacl_Impl_BignumQ_Mul_mul_5(qmu, q1, mu1);
+ Hacl_Impl_BignumQ_Mul_carry(qmu_, qmu);
+ Hacl_Impl_BignumQ_Mul_div_264(qmu_264, qmu_);
+}
+
+static void
+Hacl_Impl_BignumQ_Mul_barrett_reduction__2(uint64_t *t, uint64_t *m1, uint64_t *tmp)
+{
+ uint64_t *qmul = tmp;
+ uint64_t *r = tmp + (uint32_t )5;
+ uint64_t *qmu_264 = tmp + (uint32_t )20;
+ uint64_t *s = tmp + (uint32_t )25;
+ Hacl_Impl_BignumQ_Mul_mod_264(r, t);
+ Hacl_Impl_BignumQ_Mul_low_mul_5(qmul, qmu_264, m1);
+ Hacl_Impl_BignumQ_Mul_sub_mod_264(s, r, qmul);
+}
+
+static void
+Hacl_Impl_BignumQ_Mul_barrett_reduction__(
+ uint64_t *z,
+ uint64_t *t,
+ uint64_t *m1,
+ uint64_t *mu1,
+ uint64_t *tmp
+)
+{
+ uint64_t *s = tmp + (uint32_t )25;
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )9);
+ FStar_UInt128_t qmu[9];
+ for (uintmax_t _i = 0; _i < (uint32_t )9; ++_i)
+ qmu[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Impl_BignumQ_Mul_barrett_reduction__1(qmu, t, mu1, tmp);
+ Hacl_Impl_BignumQ_Mul_barrett_reduction__2(t, m1, tmp);
+ Hacl_Impl_BignumQ_Mul_subm_conditional(z, s);
+}
+
+static void Hacl_Impl_BignumQ_Mul_barrett_reduction_(uint64_t *z, uint64_t *t)
+{
+ uint64_t tmp[40] = { 0 };
+ uint64_t *m1 = tmp;
+ uint64_t *mu1 = tmp + (uint32_t )5;
+ uint64_t *tmp1 = tmp + (uint32_t )10;
+ Hacl_Impl_BignumQ_Mul_make_m(m1);
+ Hacl_Impl_BignumQ_Mul_make_mu(mu1);
+ Hacl_Impl_BignumQ_Mul_barrett_reduction__(z, t, m1, mu1, tmp1);
+}
+
+static void Hacl_Impl_BignumQ_Mul_barrett_reduction(uint64_t *z, uint64_t *t)
+{
+ Hacl_Impl_BignumQ_Mul_barrett_reduction_(z, t);
+}
+
+static void Hacl_Impl_BignumQ_Mul_mul_modq(uint64_t *out, uint64_t *x, uint64_t *y)
+{
+ uint64_t z_[10] = { 0 };
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )9);
+ FStar_UInt128_t z[9];
+ for (uintmax_t _i = 0; _i < (uint32_t )9; ++_i)
+ z[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Impl_BignumQ_Mul_mul_5(z, x, y);
+ Hacl_Impl_BignumQ_Mul_carry(z_, z);
+ Hacl_Impl_BignumQ_Mul_barrett_reduction_(out, z_);
+}
+
+static void Hacl_Impl_BignumQ_Mul_add_modq_(uint64_t *out, uint64_t *x, uint64_t *y)
+{
+ uint64_t tmp[5] = { 0 };
+ uint64_t x0 = x[0];
+ uint64_t x1 = x[1];
+ uint64_t x2 = x[2];
+ uint64_t x3 = x[3];
+ uint64_t x4 = x[4];
+ uint64_t y0 = y[0];
+ uint64_t y1 = y[1];
+ uint64_t y2 = y[2];
+ uint64_t y3 = y[3];
+ uint64_t y4 = y[4];
+ uint64_t z0 = x0 + y0;
+ uint64_t z1 = x1 + y1;
+ uint64_t z2 = x2 + y2;
+ uint64_t z3 = x3 + y3;
+ uint64_t z4 = x4 + y4;
+ uint64_t x5 = z0;
+ uint64_t y5 = z1;
+ uint64_t carry1 = x5 >> (uint32_t )56;
+ uint64_t t = x5 & (uint64_t )0xffffffffffffff;
+ uint64_t x01 = t;
+ uint64_t z1_ = y5 + carry1;
+ uint64_t x6 = z1_;
+ uint64_t y6 = z2;
+ uint64_t carry2 = x6 >> (uint32_t )56;
+ uint64_t t1 = x6 & (uint64_t )0xffffffffffffff;
+ uint64_t x11 = t1;
+ uint64_t z2_ = y6 + carry2;
+ uint64_t x7 = z2_;
+ uint64_t y7 = z3;
+ uint64_t carry3 = x7 >> (uint32_t )56;
+ uint64_t t2 = x7 & (uint64_t )0xffffffffffffff;
+ uint64_t x21 = t2;
+ uint64_t z3_ = y7 + carry3;
+ uint64_t x8 = z3_;
+ uint64_t y8 = z4;
+ uint64_t carry4 = x8 >> (uint32_t )56;
+ uint64_t t3 = x8 & (uint64_t )0xffffffffffffff;
+ uint64_t x31 = t3;
+ uint64_t x41 = y8 + carry4;
+ Hacl_Lib_Create64_make_h64_5(tmp, x01, x11, x21, x31, x41);
+ Hacl_Impl_BignumQ_Mul_subm_conditional(out, tmp);
+}
+
+static void Hacl_Impl_BignumQ_Mul_add_modq(uint64_t *out, uint64_t *x, uint64_t *y)
+{
+ Hacl_Impl_BignumQ_Mul_add_modq_(out, x, y);
+}
+
+static void
+Hacl_Impl_SHA512_ModQ_sha512_modq_pre_(
+ uint64_t *out,
+ uint8_t *prefix,
+ uint8_t *input,
+ uint32_t len1,
+ uint64_t *tmp
+)
+{
+ uint8_t hash1[64] = { 0 };
+ Hacl_Impl_Sha512_sha512_pre_msg(hash1, prefix, input, len1);
+ Hacl_Impl_Load56_load_64_bytes(tmp, hash1);
+ Hacl_Impl_BignumQ_Mul_barrett_reduction(out, tmp);
+}
+
+static void
+Hacl_Impl_SHA512_ModQ_sha512_modq_pre(
+ uint64_t *out,
+ uint8_t *prefix,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint64_t tmp[10] = { 0 };
+ Hacl_Impl_SHA512_ModQ_sha512_modq_pre_(out, prefix, input, len1, tmp);
+}
+
+static void
+Hacl_Impl_SHA512_ModQ_sha512_modq_pre_pre2_(
+ uint64_t *out,
+ uint8_t *prefix,
+ uint8_t *prefix2,
+ uint8_t *input,
+ uint32_t len1,
+ uint64_t *tmp
+)
+{
+ uint8_t hash1[64] = { 0 };
+ Hacl_Impl_Sha512_sha512_pre_pre2_msg(hash1, prefix, prefix2, input, len1);
+ Hacl_Impl_Load56_load_64_bytes(tmp, hash1);
+ Hacl_Impl_BignumQ_Mul_barrett_reduction(out, tmp);
+}
+
+static void
+Hacl_Impl_SHA512_ModQ_sha512_modq_pre_pre2(
+ uint64_t *out,
+ uint8_t *prefix,
+ uint8_t *prefix2,
+ uint8_t *input,
+ uint32_t len1
+)
+{
+ uint64_t tmp[10] = { 0 };
+ Hacl_Impl_SHA512_ModQ_sha512_modq_pre_pre2_(out, prefix, prefix2, input, len1, tmp);
+}
+
+static bool Hacl_Impl_Ed25519_Verify_Steps_verify_step_1(uint64_t *r_, uint8_t *signature)
+{
+ uint8_t *rs = signature;
+ bool b_ = Hacl_Impl_Ed25519_PointDecompress_point_decompress(r_, rs);
+ return b_;
+}
+
+static void
+Hacl_Impl_Ed25519_Verify_Steps_verify_step_2(
+ uint8_t *r,
+ uint8_t *msg,
+ uint32_t len1,
+ uint8_t *rs,
+ uint8_t *public
+)
+{
+ uint64_t r_[5] = { 0 };
+ Hacl_Impl_SHA512_ModQ_sha512_modq_pre_pre2(r_, rs, public, msg, len1);
+ Hacl_Impl_Store56_store_56(r, r_);
+}
+
+static void Hacl_Impl_Ed25519_Verify_Steps_point_mul_g(uint64_t *result, uint8_t *scalar)
+{
+ uint64_t g1[20] = { 0 };
+ Hacl_Impl_Ed25519_G_make_g(g1);
+ Hacl_Impl_Ed25519_Ladder_point_mul(result, scalar, g1);
+}
+
+static bool
+Hacl_Impl_Ed25519_Verify_Steps_verify_step_4(
+ uint8_t *s,
+ uint8_t *h_,
+ uint64_t *a_,
+ uint64_t *r_
+)
+{
+ uint64_t tmp[60] = { 0 };
+ uint64_t *hA = tmp;
+ uint64_t *rhA = tmp + (uint32_t )20;
+ uint64_t *sB = tmp + (uint32_t )40;
+ Hacl_Impl_Ed25519_Verify_Steps_point_mul_g(sB, s);
+ Hacl_Impl_Ed25519_Ladder_point_mul(hA, h_, a_);
+ Hacl_Impl_Ed25519_PointAdd_point_add(rhA, r_, hA);
+ bool b = Hacl_Impl_Ed25519_PointEqual_point_equal(sB, rhA);
+ return b;
+}
+
+static bool
+Hacl_Impl_Ed25519_Verify_verify__(
+ uint8_t *public,
+ uint8_t *msg,
+ uint32_t len1,
+ uint8_t *signature,
+ uint64_t *tmp,
+ uint8_t *tmp_
+)
+{
+ uint64_t *a_ = tmp;
+ uint64_t *r_ = tmp + (uint32_t )20;
+ uint64_t *s = tmp + (uint32_t )40;
+ uint8_t *h_ = tmp_;
+ bool b = Hacl_Impl_Ed25519_PointDecompress_point_decompress(a_, public);
+ bool res;
+ if (b)
+ {
+ uint8_t *rs = signature;
+ bool b_ = Hacl_Impl_Ed25519_Verify_Steps_verify_step_1(r_, signature);
+ bool ite0;
+ if (b_)
+ {
+ Hacl_Impl_Load56_load_32_bytes(s, signature + (uint32_t )32);
+ bool b__ = Hacl_Impl_Ed25519_PointEqual_gte_q(s);
+ bool ite;
+ if (b__)
+ ite = false;
+ else
+ {
+ Hacl_Impl_Ed25519_Verify_Steps_verify_step_2(h_, msg, len1, rs, public);
+ bool
+ b1 = Hacl_Impl_Ed25519_Verify_Steps_verify_step_4(signature + (uint32_t )32, h_, a_, r_);
+ ite = b1;
+ }
+ ite0 = ite;
+ }
+ else
+ ite0 = false;
+ res = ite0;
+ }
+ else
+ res = false;
+ return res;
+}
+
+static bool
+Hacl_Impl_Ed25519_Verify_verify_(
+ uint8_t *public,
+ uint8_t *msg,
+ uint32_t len1,
+ uint8_t *signature
+)
+{
+ uint64_t tmp[45] = { 0 };
+ uint8_t tmp_[32] = { 0 };
+ bool res = Hacl_Impl_Ed25519_Verify_verify__(public, msg, len1, signature, tmp, tmp_);
+ return res;
+}
+
+static bool
+Hacl_Impl_Ed25519_Verify_verify(
+ uint8_t *public,
+ uint8_t *msg,
+ uint32_t len1,
+ uint8_t *signature
+)
+{
+ return Hacl_Impl_Ed25519_Verify_verify_(public, msg, len1, signature);
+}
+
+static void Hacl_Impl_Ed25519_Sign_Steps_point_mul_g(uint64_t *result, uint8_t *scalar)
+{
+ uint64_t g1[20] = { 0 };
+ Hacl_Impl_Ed25519_G_make_g(g1);
+ Hacl_Impl_Ed25519_Ladder_point_mul(result, scalar, g1);
+}
+
+static void Hacl_Impl_Ed25519_Sign_Steps_point_mul_g_compress(uint8_t *out, uint8_t *s)
+{
+ uint64_t tmp[20] = { 0 };
+ Hacl_Impl_Ed25519_Sign_Steps_point_mul_g(tmp, s);
+ Hacl_Impl_Ed25519_PointCompress_point_compress(out, tmp);
+}
+
+static void
+Hacl_Impl_Ed25519_Sign_Steps_copy_bytes(uint8_t *output, uint8_t *input, uint32_t len1)
+{
+ memcpy(output, input, len1 * sizeof input[0]);
+}
+
+static void
+Hacl_Impl_Ed25519_Sign_Steps_sign_step_1(
+ uint8_t *secret,
+ uint8_t *tmp_bytes,
+ uint64_t *tmp_ints
+)
+{
+ uint8_t *a__ = tmp_bytes + (uint32_t )96;
+ uint8_t *apre = tmp_bytes + (uint32_t )224;
+ uint8_t *a = apre;
+ (void )(apre + (uint32_t )32);
+ Hacl_Impl_Ed25519_SecretExpand_secret_expand(apre, secret);
+ Hacl_Impl_Ed25519_Sign_Steps_point_mul_g_compress(a__, a);
+}
+
+static void
+Hacl_Impl_Ed25519_Sign_Steps_sign_step_2(
+ uint8_t *msg,
+ uint32_t len1,
+ uint8_t *tmp_bytes,
+ uint64_t *tmp_ints
+)
+{
+ uint64_t *r = tmp_ints + (uint32_t )20;
+ (void )(tmp_bytes + (uint32_t )96);
+ uint8_t *apre = tmp_bytes + (uint32_t )224;
+ uint8_t *prefix = apre + (uint32_t )32;
+ Hacl_Impl_SHA512_ModQ_sha512_modq_pre(r, prefix, msg, len1);
+}
+
+static void
+Hacl_Impl_Ed25519_Sign_Steps_sign_step_4(
+ uint8_t *msg,
+ uint32_t len1,
+ uint8_t *tmp_bytes,
+ uint64_t *tmp_ints
+)
+{
+ (void )(tmp_ints + (uint32_t )20);
+ uint64_t *h = tmp_ints + (uint32_t )60;
+ uint8_t *a__ = tmp_bytes + (uint32_t )96;
+ (void )(tmp_bytes + (uint32_t )128);
+ uint8_t *rs_ = tmp_bytes + (uint32_t )160;
+ uint8_t *apre = tmp_bytes + (uint32_t )224;
+ (void )(apre + (uint32_t )32);
+ Hacl_Impl_SHA512_ModQ_sha512_modq_pre_pre2(h, rs_, a__, msg, len1);
+}
+
+static void Hacl_Impl_Ed25519_Sign_Steps_sign_step_5(uint8_t *tmp_bytes, uint64_t *tmp_ints)
+{
+ uint64_t *r = tmp_ints + (uint32_t )20;
+ uint64_t *aq = tmp_ints + (uint32_t )45;
+ uint64_t *ha = tmp_ints + (uint32_t )50;
+ uint64_t *s = tmp_ints + (uint32_t )55;
+ uint64_t *h = tmp_ints + (uint32_t )60;
+ uint8_t *s_ = tmp_bytes + (uint32_t )192;
+ uint8_t *apre = tmp_bytes + (uint32_t )224;
+ (void )(tmp_bytes + (uint32_t )160);
+ uint8_t *a = apre;
+ Hacl_Impl_Load56_load_32_bytes(aq, a);
+ Hacl_Impl_BignumQ_Mul_mul_modq(ha, h, aq);
+ Hacl_Impl_BignumQ_Mul_add_modq(s, r, ha);
+ Hacl_Impl_Store56_store_56(s_, s);
+}
+
+static void Hacl_Impl_Ed25519_Sign_append_to_sig(uint8_t *signature, uint8_t *a, uint8_t *b)
+{
+ Hacl_Impl_Ed25519_Sign_Steps_copy_bytes(signature, a, (uint32_t )32);
+ Hacl_Impl_Ed25519_Sign_Steps_copy_bytes(signature + (uint32_t )32, b, (uint32_t )32);
+}
+
+static void
+Hacl_Impl_Ed25519_Sign_sign_(uint8_t *signature, uint8_t *secret, uint8_t *msg, uint32_t len1)
+{
+ uint8_t tmp_bytes[352] = { 0 };
+ uint64_t tmp_ints[65] = { 0 };
+ (void )(tmp_ints + (uint32_t )20);
+ (void )(tmp_ints + (uint32_t )60);
+ uint8_t *rs_ = tmp_bytes + (uint32_t )160;
+ uint8_t *s_ = tmp_bytes + (uint32_t )192;
+ Hacl_Impl_Ed25519_Sign_Steps_sign_step_1(secret, tmp_bytes, tmp_ints);
+ Hacl_Impl_Ed25519_Sign_Steps_sign_step_2(msg, len1, tmp_bytes, tmp_ints);
+ (void )(tmp_bytes + (uint32_t )96);
+ (void )(tmp_bytes + (uint32_t )224);
+ uint8_t rb[32] = { 0 };
+ uint64_t *r = tmp_ints + (uint32_t )20;
+ uint8_t *rs_0 = tmp_bytes + (uint32_t )160;
+ Hacl_Impl_Store56_store_56(rb, r);
+ Hacl_Impl_Ed25519_Sign_Steps_point_mul_g_compress(rs_0, rb);
+ Hacl_Impl_Ed25519_Sign_Steps_sign_step_4(msg, len1, tmp_bytes, tmp_ints);
+ Hacl_Impl_Ed25519_Sign_Steps_sign_step_5(tmp_bytes, tmp_ints);
+ Hacl_Impl_Ed25519_Sign_append_to_sig(signature, rs_, s_);
+}
+
+static void
+Hacl_Impl_Ed25519_Sign_sign(uint8_t *signature, uint8_t *secret, uint8_t *msg, uint32_t len1)
+{
+ Hacl_Impl_Ed25519_Sign_sign_(signature, secret, msg, len1);
+}
+
+void *Ed25519_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b)
+{
+ return (void *)(uint8_t )0;
+}
+
+void Ed25519_sign(uint8_t *signature, uint8_t *secret, uint8_t *msg, uint32_t len1)
+{
+ Hacl_Impl_Ed25519_Sign_sign(signature, secret, msg, len1);
+}
+
+bool Ed25519_verify(uint8_t *public, uint8_t *msg, uint32_t len1, uint8_t *signature)
+{
+ return Hacl_Impl_Ed25519_Verify_verify(public, msg, len1, signature);
+}
+
+void Ed25519_secret_to_public(uint8_t *out, uint8_t *secret)
+{
+ Hacl_Impl_Ed25519_SecretToPublic_secret_to_public(out, secret);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Ed25519.h b/sw/airborne/modules/datalink/hacl-c/Ed25519.h
new file mode 100644
index 0000000000..f00b03c60d
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Ed25519.h
@@ -0,0 +1,261 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Ed25519_H
+#define __Ed25519_H
+
+
+
+
+
+typedef uint64_t Hacl_Lib_Create64_h64;
+
+typedef uint64_t Hacl_Bignum_Constants_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Constants_wide;
+
+typedef uint64_t Hacl_Bignum_Parameters_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Parameters_wide;
+
+typedef uint32_t Hacl_Bignum_Parameters_ctr;
+
+typedef uint64_t *Hacl_Bignum_Parameters_felem;
+
+typedef FStar_UInt128_t *Hacl_Bignum_Parameters_felem_wide;
+
+typedef void *Hacl_Bignum_Parameters_seqelem;
+
+typedef void *Hacl_Bignum_Parameters_seqelem_wide;
+
+typedef FStar_UInt128_t Hacl_Bignum_Wide_t;
+
+typedef uint64_t Hacl_Bignum_Limb_t;
+
+typedef Prims_int Hacl_Spec_Bignum_Field_elem;
+
+extern void
+Hacl_Spec_Bignum_Field_lemma_addition_associativity(Prims_int x0, Prims_int x1, Prims_int x2);
+
+extern void
+Hacl_Spec_Bignum_Field_lemma_multiplication_associativity(
+ Prims_int x0,
+ Prims_int x1,
+ Prims_int x2
+);
+
+extern void Hacl_Spec_Bignum_Field_lemma_addition_symmetry(Prims_int x0);
+
+extern void Hacl_Spec_Bignum_Field_lemma_multiplication_symmetry(Prims_int x0);
+
+typedef Prims_int Hacl_Spec_Bignum_elem;
+
+typedef void *Hacl_Spec_EC_AddAndDouble2_s_513;
+
+typedef void *Hacl_Spec_EC_AddAndDouble2_s_52;
+
+typedef void *Hacl_Spec_EC_AddAndDouble2_s_53;
+
+typedef void *Hacl_Spec_EC_AddAndDouble2_s_5413;
+
+typedef void *Hacl_Spec_Bignum_Crecip_s_513;
+
+extern void Hacl_Bignum_lemma_diff(Prims_int x0, Prims_int x1, Prims_pos x2);
+
+typedef Prims_nat Hacl_Spec_EC_Format_Lemmas_u51;
+
+typedef struct
+{
+ void *fst;
+ void *snd;
+}
+K___FStar_Seq_Base_seq_uint64_t_FStar_Seq_Base_seq_uint64_t;
+
+typedef K___FStar_Seq_Base_seq_uint64_t_FStar_Seq_Base_seq_uint64_t Hacl_Spec_EC_Point_spoint;
+
+typedef void *Hacl_Spec_EC_Point_s_513;
+
+typedef K___FStar_Seq_Base_seq_uint64_t_FStar_Seq_Base_seq_uint64_t
+Hacl_Spec_EC_Point_spoint_513;
+
+typedef void *Hacl_Spec_EC_Format_uint8_s;
+
+typedef uint64_t *Hacl_EC_Point_point;
+
+typedef uint8_t *Hacl_EC_Format_uint8_p;
+
+typedef uint64_t Hacl_Bignum25519_limb;
+
+typedef uint64_t *Hacl_Bignum25519_felem;
+
+typedef void *Hacl_Bignum25519_seqelem;
+
+typedef uint64_t *Hacl_Impl_Ed25519_ExtPoint_point;
+
+typedef uint8_t *Hacl_Impl_Store51_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Store51_felem;
+
+typedef uint8_t *Hacl_Impl_Ed25519_PointCompress_hint8_p;
+
+typedef uint64_t *Hacl_Impl_Ed25519_PointCompress_hint64_p;
+
+typedef uint64_t *Hacl_Impl_Ed25519_SwapConditional_felem;
+
+typedef uint8_t *Hacl_Impl_Ed25519_Ladder_Step_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Ed25519_Ladder_elemB;
+
+typedef uint8_t *Hacl_Hash_Lib_LoadStore_uint8_p;
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_t;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_t;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_t;
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_ht;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_ht;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_ht;
+
+typedef uint8_t *Hacl_Hash_Lib_Create_uint8_p;
+
+typedef uint32_t *Hacl_Hash_Lib_Create_uint32_p;
+
+typedef uint64_t *Hacl_Hash_Lib_Create_uint64_p;
+
+typedef uint8_t Hacl_Hash_SHA2_512_Lemmas_uint8_t;
+
+typedef uint32_t Hacl_Hash_SHA2_512_Lemmas_uint32_t;
+
+typedef uint64_t Hacl_Hash_SHA2_512_Lemmas_uint64_t;
+
+typedef uint8_t Hacl_Hash_SHA2_512_Lemmas_uint8_ht;
+
+typedef uint32_t Hacl_Hash_SHA2_512_Lemmas_uint32_ht;
+
+typedef uint64_t Hacl_Hash_SHA2_512_Lemmas_uint64_ht;
+
+typedef FStar_UInt128_t Hacl_Hash_SHA2_512_Lemmas_uint128_ht;
+
+typedef uint64_t *Hacl_Hash_SHA2_512_Lemmas_uint64_p;
+
+typedef uint8_t *Hacl_Hash_SHA2_512_Lemmas_uint8_p;
+
+typedef uint8_t Hacl_Hash_SHA2_512_uint8_t;
+
+typedef uint32_t Hacl_Hash_SHA2_512_uint32_t;
+
+typedef uint64_t Hacl_Hash_SHA2_512_uint64_t;
+
+typedef uint8_t Hacl_Hash_SHA2_512_uint8_ht;
+
+typedef uint32_t Hacl_Hash_SHA2_512_uint32_ht;
+
+typedef uint64_t Hacl_Hash_SHA2_512_uint64_ht;
+
+typedef FStar_UInt128_t Hacl_Hash_SHA2_512_uint128_ht;
+
+typedef uint64_t *Hacl_Hash_SHA2_512_uint64_p;
+
+typedef uint8_t *Hacl_Hash_SHA2_512_uint8_p;
+
+typedef uint8_t SHA2_512_uint8_t;
+
+typedef uint32_t SHA2_512_uint32_t;
+
+typedef uint64_t SHA2_512_uint64_t;
+
+typedef uint8_t SHA2_512_uint8_ht;
+
+typedef uint32_t SHA2_512_uint32_ht;
+
+typedef uint64_t SHA2_512_uint64_ht;
+
+typedef FStar_UInt128_t SHA2_512_uint128_ht;
+
+typedef uint64_t *SHA2_512_uint64_p;
+
+typedef uint8_t *SHA2_512_uint8_p;
+
+typedef uint8_t *Hacl_Impl_Ed25519_SecretExpand_hint8_p;
+
+typedef uint8_t *Hacl_Impl_Ed25519_SecretToPublic_hint8_p;
+
+typedef Prims_nat Hacl_Impl_Ed25519_Verify_Lemmas_u51;
+
+typedef void *Hacl_Spec_BignumQ_Eval_qelem;
+
+typedef uint32_t Hacl_Spec_BignumQ_Eval_u32;
+
+typedef uint64_t Hacl_Spec_BignumQ_Eval_u64;
+
+typedef uint8_t *Hacl_Impl_Ed25519_PointEqual_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Ed25519_PointEqual_felem;
+
+typedef uint32_t Hacl_Impl_Load56_u32;
+
+typedef uint8_t Hacl_Impl_Load56_h8;
+
+typedef uint64_t Hacl_Impl_Load56_h64;
+
+typedef uint8_t *Hacl_Impl_Load56_hint8_p;
+
+typedef uint64_t *Hacl_Impl_Ed25519_RecoverX_elemB;
+
+typedef uint32_t Hacl_Impl_Load51_u32;
+
+typedef uint8_t Hacl_Impl_Load51_h8;
+
+typedef uint64_t Hacl_Impl_Load51_h64;
+
+typedef uint8_t *Hacl_Impl_Load51_hint8_p;
+
+typedef uint8_t *Hacl_Impl_Store56_hint8_p;
+
+typedef uint64_t *Hacl_Impl_Store56_qelem;
+
+typedef uint8_t *Hacl_Impl_SHA512_Ed25519_1_hint8_p;
+
+typedef uint8_t *Hacl_Impl_SHA512_Ed25519_hint8_p;
+
+typedef uint8_t *Hacl_Impl_Sha512_hint8_p;
+
+typedef FStar_UInt128_t Hacl_Lib_Create128_h128;
+
+typedef uint64_t Hacl_Spec_BignumQ_Mul_Lemmas_3_u56;
+
+typedef Prims_nat Hacl_Spec_BignumQ_Mul_Lemmas_1_u56;
+
+typedef void *Hacl_Spec_BignumQ_Mul_qelem_56;
+
+typedef uint64_t *Hacl_Impl_BignumQ_Mul_qelemB;
+
+typedef uint64_t Hacl_Impl_BignumQ_Mul_h64;
+
+typedef uint8_t *Hacl_Impl_Ed25519_Verify_Steps_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Ed25519_Verify_Steps_felem;
+
+typedef uint8_t *Hacl_Impl_Ed25519_Verify_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Ed25519_Verify_felem;
+
+typedef uint8_t *Hacl_Impl_Ed25519_Sign_Steps_hint8_p;
+
+typedef uint8_t *Hacl_Impl_Ed25519_Sign_hint8_p;
+
+typedef uint8_t *Ed25519_uint8_p;
+
+typedef uint8_t *Ed25519_hint8_p;
+
+void *Ed25519_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b);
+
+void Ed25519_sign(uint8_t *signature, uint8_t *secret, uint8_t *msg, uint32_t len1);
+
+bool Ed25519_verify(uint8_t *public, uint8_t *msg, uint32_t len1, uint8_t *signature);
+
+void Ed25519_secret_to_public(uint8_t *out, uint8_t *secret);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/FStar.h b/sw/airborne/modules/datalink/hacl-c/FStar.h
new file mode 100644
index 0000000000..c72834e2a0
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/FStar.h
@@ -0,0 +1,294 @@
+/* This file was auto-generated by KreMLin! */
+
+#ifndef __FStar_H
+#define __FStar_H
+
+typedef struct
+{
+ uint64_t low;
+ uint64_t high;
+}
+FStar_UInt128_uint128;
+
+typedef FStar_UInt128_uint128 FStar_UInt128_t;
+
+typedef struct
+{
+ uint64_t fst;
+ uint64_t snd;
+ uint64_t thd;
+ uint64_t f3;
+}
+K___uint64_t_uint64_t_uint64_t_uint64_t;
+
+static uint64_t FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
+{
+ return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t )63;
+}
+
+static uint64_t FStar_UInt128_carry(uint64_t a, uint64_t b)
+{
+ return FStar_UInt128_constant_time_carry(a, b);
+}
+
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = a.low + b.low,
+ .high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low)
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = a.low + b.low,
+ .high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low)
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = a.low - b.low,
+ .high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low)
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused))
+FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = a.low - b.low,
+ .high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low)
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return FStar_UInt128_sub_mod_impl(a, b);
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return ((FStar_UInt128_uint128 ){ .low = a.low & b.low, .high = a.high & b.high });
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return ((FStar_UInt128_uint128 ){ .low = a.low ^ b.low, .high = a.high ^ b.high });
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return ((FStar_UInt128_uint128 ){ .low = a.low | b.low, .high = a.high | b.high });
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_lognot(FStar_UInt128_uint128 a)
+{
+ return ((FStar_UInt128_uint128 ){ .low = ~a.low, .high = ~a.high });
+}
+
+static uint32_t FStar_UInt128_u32_64 = (uint32_t )64;
+
+static uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
+}
+
+static uint64_t FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return FStar_UInt128_add_u64_shift_left(hi, lo, s);
+}
+
+static FStar_UInt128_uint128 __attribute__((unused))
+FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s == (uint32_t )0)
+ return a;
+ else
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = a.low << s,
+ .high = FStar_UInt128_add_u64_shift_left_respec(a.high, a.low, s)
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused))
+FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+ return
+ ((FStar_UInt128_uint128 ){ .low = (uint64_t )0, .high = a.low << (s - FStar_UInt128_u32_64) });
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s < FStar_UInt128_u32_64)
+ return FStar_UInt128_shift_left_small(a, s);
+ else
+ return FStar_UInt128_shift_left_large(a, s);
+}
+
+static uint64_t FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
+}
+
+static uint64_t FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return FStar_UInt128_add_u64_shift_right(hi, lo, s);
+}
+
+static FStar_UInt128_uint128 __attribute__((unused))
+FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s == (uint32_t )0)
+ return a;
+ else
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = FStar_UInt128_add_u64_shift_right_respec(a.high, a.low, s),
+ .high = a.high >> s
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused))
+FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+ return
+ ((FStar_UInt128_uint128 ){ .low = a.high >> (s - FStar_UInt128_u32_64), .high = (uint64_t )0 });
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s < FStar_UInt128_u32_64)
+ return FStar_UInt128_shift_right_small(a, s);
+ else
+ return FStar_UInt128_shift_right_large(a, s);
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high),
+ .high = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high)
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = (FStar_UInt64_gte_mask(a.high,
+ b.high)
+ & ~FStar_UInt64_eq_mask(a.high, b.high))
+ | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)),
+ .high = (FStar_UInt64_gte_mask(a.high,
+ b.high)
+ & ~FStar_UInt64_eq_mask(a.high, b.high))
+ | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low))
+ }
+ );
+}
+
+static FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a)
+{
+ return ((FStar_UInt128_uint128 ){ .low = a, .high = (uint64_t )0 });
+}
+
+static uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
+{
+ return a.low;
+}
+
+static uint64_t FStar_UInt128_u64_l32_mask = (uint64_t )0xffffffff;
+
+static uint64_t FStar_UInt128_u64_mod_32(uint64_t a)
+{
+ return a & FStar_UInt128_u64_l32_mask;
+}
+
+static uint32_t FStar_UInt128_u32_32 = (uint32_t )32;
+
+static K___uint64_t_uint64_t_uint64_t_uint64_t
+FStar_UInt128_mul_wide_impl_t_(uint64_t x, uint64_t y)
+{
+ return
+ (
+ (K___uint64_t_uint64_t_uint64_t_uint64_t ){
+ .fst = FStar_UInt128_u64_mod_32(x),
+ .snd = FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y)),
+ .thd = x >> FStar_UInt128_u32_32,
+ .f3 = (x >> FStar_UInt128_u32_32)
+ * FStar_UInt128_u64_mod_32(y)
+ + (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32)
+ }
+ );
+}
+
+static uint64_t FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
+{
+ return lo + (hi << FStar_UInt128_u32_32);
+}
+
+static FStar_UInt128_uint128 FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y)
+{
+ K___uint64_t_uint64_t_uint64_t_uint64_t scrut = FStar_UInt128_mul_wide_impl_t_(x, y);
+ uint64_t u1 = scrut.fst;
+ uint64_t w3 = scrut.snd;
+ uint64_t x_ = scrut.thd;
+ uint64_t t_ = scrut.f3;
+ return
+ (
+ (FStar_UInt128_uint128 ){
+ .low = FStar_UInt128_u32_combine_(u1
+ * (y >> FStar_UInt128_u32_32)
+ + FStar_UInt128_u64_mod_32(t_),
+ w3),
+ .high = x_
+ * (y >> FStar_UInt128_u32_32)
+ + (t_ >> FStar_UInt128_u32_32)
+ +
+ ((u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_))
+ >> FStar_UInt128_u32_32)
+ }
+ );
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
+{
+ return FStar_UInt128_mul_wide_impl(x, y);
+}
+
+static FStar_UInt128_uint128 __attribute__((unused)) FStar_Int_Cast_Full_uint64_to_uint128(uint64_t a)
+{
+ return FStar_UInt128_uint64_to_uint128(a);
+}
+
+static uint64_t __attribute__((unused)) FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_uint128 a)
+{
+ return FStar_UInt128_uint128_to_uint64(a);
+}
+
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/HMAC_SHA2_256.c b/sw/airborne/modules/datalink/hacl-c/HMAC_SHA2_256.c
new file mode 100644
index 0000000000..7b89075471
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/HMAC_SHA2_256.c
@@ -0,0 +1,348 @@
+#include "HMAC_SHA2_256.h"
+
+static void
+Hacl_Hash_Lib_LoadStore_uint32s_from_be_bytes(uint32_t *output, uint8_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *x0 = input + (uint32_t )4 * i;
+ uint32_t inputi = load32_be(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Hash_Lib_LoadStore_uint32s_to_be_bytes(uint8_t *output, uint32_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint32_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t )4 * i;
+ store32_be(x0, hd1);
+ }
+}
+
+static void Hacl_Hash_SHA2_256_init(uint32_t *state)
+{
+ (void )(state + (uint32_t )136);
+ uint32_t *k1 = state;
+ uint32_t *h_01 = state + (uint32_t )128;
+ uint32_t *p10 = k1;
+ uint32_t *p20 = k1 + (uint32_t )16;
+ uint32_t *p3 = k1 + (uint32_t )32;
+ uint32_t *p4 = k1 + (uint32_t )48;
+ uint32_t *p11 = p10;
+ uint32_t *p21 = p10 + (uint32_t )8;
+ uint32_t *p12 = p11;
+ uint32_t *p22 = p11 + (uint32_t )4;
+ p12[0] = (uint32_t )0x428a2f98;
+ p12[1] = (uint32_t )0x71374491;
+ p12[2] = (uint32_t )0xb5c0fbcf;
+ p12[3] = (uint32_t )0xe9b5dba5;
+ p22[0] = (uint32_t )0x3956c25b;
+ p22[1] = (uint32_t )0x59f111f1;
+ p22[2] = (uint32_t )0x923f82a4;
+ p22[3] = (uint32_t )0xab1c5ed5;
+ uint32_t *p13 = p21;
+ uint32_t *p23 = p21 + (uint32_t )4;
+ p13[0] = (uint32_t )0xd807aa98;
+ p13[1] = (uint32_t )0x12835b01;
+ p13[2] = (uint32_t )0x243185be;
+ p13[3] = (uint32_t )0x550c7dc3;
+ p23[0] = (uint32_t )0x72be5d74;
+ p23[1] = (uint32_t )0x80deb1fe;
+ p23[2] = (uint32_t )0x9bdc06a7;
+ p23[3] = (uint32_t )0xc19bf174;
+ uint32_t *p14 = p20;
+ uint32_t *p24 = p20 + (uint32_t )8;
+ uint32_t *p15 = p14;
+ uint32_t *p25 = p14 + (uint32_t )4;
+ p15[0] = (uint32_t )0xe49b69c1;
+ p15[1] = (uint32_t )0xefbe4786;
+ p15[2] = (uint32_t )0x0fc19dc6;
+ p15[3] = (uint32_t )0x240ca1cc;
+ p25[0] = (uint32_t )0x2de92c6f;
+ p25[1] = (uint32_t )0x4a7484aa;
+ p25[2] = (uint32_t )0x5cb0a9dc;
+ p25[3] = (uint32_t )0x76f988da;
+ uint32_t *p16 = p24;
+ uint32_t *p26 = p24 + (uint32_t )4;
+ p16[0] = (uint32_t )0x983e5152;
+ p16[1] = (uint32_t )0xa831c66d;
+ p16[2] = (uint32_t )0xb00327c8;
+ p16[3] = (uint32_t )0xbf597fc7;
+ p26[0] = (uint32_t )0xc6e00bf3;
+ p26[1] = (uint32_t )0xd5a79147;
+ p26[2] = (uint32_t )0x06ca6351;
+ p26[3] = (uint32_t )0x14292967;
+ uint32_t *p17 = p3;
+ uint32_t *p27 = p3 + (uint32_t )8;
+ uint32_t *p18 = p17;
+ uint32_t *p28 = p17 + (uint32_t )4;
+ p18[0] = (uint32_t )0x27b70a85;
+ p18[1] = (uint32_t )0x2e1b2138;
+ p18[2] = (uint32_t )0x4d2c6dfc;
+ p18[3] = (uint32_t )0x53380d13;
+ p28[0] = (uint32_t )0x650a7354;
+ p28[1] = (uint32_t )0x766a0abb;
+ p28[2] = (uint32_t )0x81c2c92e;
+ p28[3] = (uint32_t )0x92722c85;
+ uint32_t *p19 = p27;
+ uint32_t *p29 = p27 + (uint32_t )4;
+ p19[0] = (uint32_t )0xa2bfe8a1;
+ p19[1] = (uint32_t )0xa81a664b;
+ p19[2] = (uint32_t )0xc24b8b70;
+ p19[3] = (uint32_t )0xc76c51a3;
+ p29[0] = (uint32_t )0xd192e819;
+ p29[1] = (uint32_t )0xd6990624;
+ p29[2] = (uint32_t )0xf40e3585;
+ p29[3] = (uint32_t )0x106aa070;
+ uint32_t *p110 = p4;
+ uint32_t *p210 = p4 + (uint32_t )8;
+ uint32_t *p1 = p110;
+ uint32_t *p211 = p110 + (uint32_t )4;
+ p1[0] = (uint32_t )0x19a4c116;
+ p1[1] = (uint32_t )0x1e376c08;
+ p1[2] = (uint32_t )0x2748774c;
+ p1[3] = (uint32_t )0x34b0bcb5;
+ p211[0] = (uint32_t )0x391c0cb3;
+ p211[1] = (uint32_t )0x4ed8aa4a;
+ p211[2] = (uint32_t )0x5b9cca4f;
+ p211[3] = (uint32_t )0x682e6ff3;
+ uint32_t *p111 = p210;
+ uint32_t *p212 = p210 + (uint32_t )4;
+ p111[0] = (uint32_t )0x748f82ee;
+ p111[1] = (uint32_t )0x78a5636f;
+ p111[2] = (uint32_t )0x84c87814;
+ p111[3] = (uint32_t )0x8cc70208;
+ p212[0] = (uint32_t )0x90befffa;
+ p212[1] = (uint32_t )0xa4506ceb;
+ p212[2] = (uint32_t )0xbef9a3f7;
+ p212[3] = (uint32_t )0xc67178f2;
+ uint32_t *p112 = h_01;
+ uint32_t *p2 = h_01 + (uint32_t )4;
+ p112[0] = (uint32_t )0x6a09e667;
+ p112[1] = (uint32_t )0xbb67ae85;
+ p112[2] = (uint32_t )0x3c6ef372;
+ p112[3] = (uint32_t )0xa54ff53a;
+ p2[0] = (uint32_t )0x510e527f;
+ p2[1] = (uint32_t )0x9b05688c;
+ p2[2] = (uint32_t )0x1f83d9ab;
+ p2[3] = (uint32_t )0x5be0cd19;
+}
+
+static void Hacl_Hash_SHA2_256_update(uint32_t *state, uint8_t *data)
+{
+ uint32_t data_w[16] = { 0 };
+ Hacl_Hash_Lib_LoadStore_uint32s_from_be_bytes(data_w, data, (uint32_t )16);
+ uint32_t *hash_w = state + (uint32_t )128;
+ uint32_t *ws_w = state + (uint32_t )64;
+ uint32_t *k_w = state;
+ uint32_t *counter_w = state + (uint32_t )136;
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint32_t uu____206 = data_w[i];
+ ws_w[i] = uu____206;
+ }
+ for (uint32_t i = (uint32_t )16; i < (uint32_t )64; i = i + (uint32_t )1)
+ {
+ uint32_t t16 = ws_w[i - (uint32_t )16];
+ uint32_t t15 = ws_w[i - (uint32_t )15];
+ uint32_t t7 = ws_w[i - (uint32_t )7];
+ uint32_t t2 = ws_w[i - (uint32_t )2];
+ ws_w[i] =
+ ((t2 >> (uint32_t )17 | t2 << (uint32_t )32 - (uint32_t )17)
+ ^ (t2 >> (uint32_t )19 | t2 << (uint32_t )32 - (uint32_t )19) ^ t2 >> (uint32_t )10)
+ +
+ t7
+ +
+ ((t15 >> (uint32_t )7 | t15 << (uint32_t )32 - (uint32_t )7)
+ ^ (t15 >> (uint32_t )18 | t15 << (uint32_t )32 - (uint32_t )18) ^ t15 >> (uint32_t )3)
+ + t16;
+ }
+ uint32_t hash_0[8] = { 0 };
+ memcpy(hash_0, hash_w, (uint32_t )8 * sizeof hash_w[0]);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )64; i = i + (uint32_t )1)
+ {
+ uint32_t a = hash_0[0];
+ uint32_t b = hash_0[1];
+ uint32_t c = hash_0[2];
+ uint32_t d = hash_0[3];
+ uint32_t e = hash_0[4];
+ uint32_t f1 = hash_0[5];
+ uint32_t g = hash_0[6];
+ uint32_t h = hash_0[7];
+ uint32_t kt = k_w[i];
+ uint32_t wst = ws_w[i];
+ uint32_t
+ t1 =
+ h
+ +
+ ((e >> (uint32_t )6 | e << (uint32_t )32 - (uint32_t )6)
+ ^
+ (e >> (uint32_t )11 | e << (uint32_t )32 - (uint32_t )11)
+ ^ (e >> (uint32_t )25 | e << (uint32_t )32 - (uint32_t )25))
+ + (e & f1 ^ ~e & g)
+ + kt
+ + wst;
+ uint32_t
+ t2 =
+ ((a >> (uint32_t )2 | a << (uint32_t )32 - (uint32_t )2)
+ ^
+ (a >> (uint32_t )13 | a << (uint32_t )32 - (uint32_t )13)
+ ^ (a >> (uint32_t )22 | a << (uint32_t )32 - (uint32_t )22))
+ + (a & b ^ a & c ^ b & c);
+ uint32_t x1 = t1 + t2;
+ uint32_t x5 = d + t1;
+ uint32_t *p1 = hash_0;
+ uint32_t *p2 = hash_0 + (uint32_t )4;
+ p1[0] = x1;
+ p1[1] = a;
+ p1[2] = b;
+ p1[3] = c;
+ p2[0] = x5;
+ p2[1] = e;
+ p2[2] = f1;
+ p2[3] = g;
+ }
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )8; i = i + (uint32_t )1)
+ {
+ uint32_t uu____871 = hash_w[i];
+ uint32_t uu____874 = hash_0[i];
+ uint32_t uu____870 = uu____871 + uu____874;
+ hash_w[i] = uu____870;
+ }
+ uint32_t c0 = counter_w[0];
+ uint32_t one1 = (uint32_t )1;
+ counter_w[0] = c0 + one1;
+}
+
+static void Hacl_Hash_SHA2_256_update_multi(uint32_t *state, uint8_t *data, uint32_t n1)
+{
+ for (uint32_t i = (uint32_t )0; i < n1; i = i + (uint32_t )1)
+ {
+ uint8_t *b = data + i * (uint32_t )64;
+ Hacl_Hash_SHA2_256_update(state, b);
+ }
+}
+
+static void Hacl_Hash_SHA2_256_update_last(uint32_t *state, uint8_t *data, uint32_t len)
+{
+ uint8_t blocks[128] = { 0 };
+ K___uint32_t_uint8_t_ uu____1925;
+ if (len < (uint32_t )56)
+ uu____1925 = ((K___uint32_t_uint8_t_ ){ .fst = (uint32_t )1, .snd = blocks + (uint32_t )64 });
+ else
+ uu____1925 = ((K___uint32_t_uint8_t_ ){ .fst = (uint32_t )2, .snd = blocks });
+ K___uint32_t_uint8_t_ scrut = uu____1925;
+ uint32_t nb = scrut.fst;
+ uint8_t *final_blocks = scrut.snd;
+ memcpy(final_blocks, data, len * sizeof data[0]);
+ uint32_t n1 = state[136];
+ uint8_t *padding = final_blocks + len;
+ uint32_t
+ pad0len = ((uint32_t )64 - (len + (uint32_t )8 + (uint32_t )1) % (uint32_t )64) % (uint32_t )64;
+ uint8_t *buf1 = padding;
+ (void )(padding + (uint32_t )1);
+ uint8_t *buf2 = padding + (uint32_t )1 + pad0len;
+ uint64_t
+ encodedlen =
+ ((uint64_t )n1 * (uint64_t )(uint32_t )64 + (uint64_t )len)
+ * (uint64_t )(uint32_t )8;
+ buf1[0] = (uint8_t )0x80;
+ store64_be(buf2, encodedlen);
+ Hacl_Hash_SHA2_256_update_multi(state, final_blocks, nb);
+}
+
+static void Hacl_Hash_SHA2_256_finish(uint32_t *state, uint8_t *hash1)
+{
+ uint32_t *hash_w = state + (uint32_t )128;
+ Hacl_Hash_Lib_LoadStore_uint32s_to_be_bytes(hash1, hash_w, (uint32_t )8);
+}
+
+static void Hacl_Hash_SHA2_256_hash(uint8_t *hash1, uint8_t *input, uint32_t len)
+{
+ uint32_t state[137] = { 0 };
+ uint32_t n1 = len / (uint32_t )64;
+ uint32_t r = len % (uint32_t )64;
+ uint8_t *input_blocks = input;
+ uint8_t *input_last = input + n1 * (uint32_t )64;
+ Hacl_Hash_SHA2_256_init(state);
+ Hacl_Hash_SHA2_256_update_multi(state, input_blocks, n1);
+ Hacl_Hash_SHA2_256_update_last(state, input_last, r);
+ Hacl_Hash_SHA2_256_finish(state, hash1);
+}
+
+static void Hacl_HMAC_SHA2_256_xor_bytes_inplace(uint8_t *a, uint8_t *b, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t uu____871 = a[i];
+ uint8_t uu____874 = b[i];
+ uint8_t uu____870 = uu____871 ^ uu____874;
+ a[i] = uu____870;
+ }
+}
+
+static void
+Hacl_HMAC_SHA2_256_hmac_core(uint8_t *mac, uint8_t *key, uint8_t *data, uint32_t len)
+{
+ uint8_t ipad[64];
+ for (uintmax_t _i = 0; _i < (uint32_t )64; ++_i)
+ ipad[_i] = (uint8_t )0x36;
+ uint8_t opad[64];
+ for (uintmax_t _i = 0; _i < (uint32_t )64; ++_i)
+ opad[_i] = (uint8_t )0x5c;
+ Hacl_HMAC_SHA2_256_xor_bytes_inplace(ipad, key, (uint32_t )64);
+ uint32_t state0[137] = { 0 };
+ uint32_t n0 = len / (uint32_t )64;
+ uint32_t r0 = len % (uint32_t )64;
+ uint8_t *blocks0 = data;
+ uint8_t *last0 = data + n0 * (uint32_t )64;
+ Hacl_Hash_SHA2_256_init(state0);
+ Hacl_Hash_SHA2_256_update(state0, ipad);
+ Hacl_Hash_SHA2_256_update_multi(state0, blocks0, n0);
+ Hacl_Hash_SHA2_256_update_last(state0, last0, r0);
+ uint8_t *hash0 = ipad;
+ Hacl_Hash_SHA2_256_finish(state0, hash0);
+ uint8_t *s4 = ipad;
+ Hacl_HMAC_SHA2_256_xor_bytes_inplace(opad, key, (uint32_t )64);
+ uint32_t state1[137] = { 0 };
+ Hacl_Hash_SHA2_256_init(state1);
+ Hacl_Hash_SHA2_256_update(state1, opad);
+ Hacl_Hash_SHA2_256_update_last(state1, s4, (uint32_t )32);
+ Hacl_Hash_SHA2_256_finish(state1, mac);
+}
+
+static void
+Hacl_HMAC_SHA2_256_hmac(
+ uint8_t *mac,
+ uint8_t *key,
+ uint32_t keylen,
+ uint8_t *data,
+ uint32_t datalen
+)
+{
+ uint8_t nkey[64];
+ for (uintmax_t _i = 0; _i < (uint32_t )64; ++_i)
+ nkey[_i] = (uint8_t )0x00;
+ if (keylen <= (uint32_t )64)
+ memcpy(nkey, key, keylen * sizeof key[0]);
+ else
+ {
+ uint8_t *nkey0 = nkey;
+ Hacl_Hash_SHA2_256_hash(nkey0, key, keylen);
+ }
+ Hacl_HMAC_SHA2_256_hmac_core(mac, nkey, data, datalen);
+}
+
+void hmac_core(uint8_t *mac, uint8_t *key, uint8_t *data, uint32_t len)
+{
+ Hacl_HMAC_SHA2_256_hmac_core(mac, key, data, len);
+}
+
+void hmac(uint8_t *mac, uint8_t *key, uint32_t keylen, uint8_t *data, uint32_t datalen)
+{
+ Hacl_HMAC_SHA2_256_hmac(mac, key, keylen, data, datalen);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/HMAC_SHA2_256.h b/sw/airborne/modules/datalink/hacl-c/HMAC_SHA2_256.h
new file mode 100644
index 0000000000..6ec0f93c27
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/HMAC_SHA2_256.h
@@ -0,0 +1,78 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __HMAC_SHA2_256_H
+#define __HMAC_SHA2_256_H
+
+
+
+#include "testlib.h"
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_t;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_t;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_t;
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_ht;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_ht;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_ht;
+
+typedef uint8_t *Hacl_Hash_Lib_Create_uint8_p;
+
+typedef uint32_t *Hacl_Hash_Lib_Create_uint32_p;
+
+typedef uint64_t *Hacl_Hash_Lib_Create_uint64_p;
+
+typedef uint8_t *Hacl_Hash_Lib_LoadStore_uint8_p;
+
+typedef uint8_t Hacl_Hash_SHA2_256_uint8_t;
+
+typedef uint32_t Hacl_Hash_SHA2_256_uint32_t;
+
+typedef uint64_t Hacl_Hash_SHA2_256_uint64_t;
+
+typedef uint8_t Hacl_Hash_SHA2_256_uint8_ht;
+
+typedef uint32_t Hacl_Hash_SHA2_256_uint32_ht;
+
+typedef uint64_t Hacl_Hash_SHA2_256_uint64_ht;
+
+typedef uint32_t *Hacl_Hash_SHA2_256_uint32_p;
+
+typedef uint8_t *Hacl_Hash_SHA2_256_uint8_p;
+
+typedef struct
+{
+ uint32_t fst;
+ uint8_t *snd;
+}
+K___uint32_t_uint8_t_;
+
+typedef uint8_t Hacl_HMAC_SHA2_256_uint8_t;
+
+typedef uint32_t Hacl_HMAC_SHA2_256_uint32_t;
+
+typedef uint64_t Hacl_HMAC_SHA2_256_uint64_t;
+
+typedef uint8_t Hacl_HMAC_SHA2_256_uint8_ht;
+
+typedef uint32_t Hacl_HMAC_SHA2_256_uint32_ht;
+
+typedef uint64_t Hacl_HMAC_SHA2_256_uint64_ht;
+
+typedef uint32_t *Hacl_HMAC_SHA2_256_uint32_p;
+
+typedef uint8_t *Hacl_HMAC_SHA2_256_uint8_p;
+
+typedef uint8_t uint8_ht;
+
+typedef uint32_t uint32_t;
+
+typedef uint8_t *uint8_p;
+
+void hmac_core(uint8_t *mac, uint8_t *key, uint8_t *data, uint32_t len);
+
+void hmac(uint8_t *mac, uint8_t *key, uint32_t keylen, uint8_t *data, uint32_t datalen);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Hacl_Policies.c b/sw/airborne/modules/datalink/hacl-c/Hacl_Policies.c
new file mode 100644
index 0000000000..0aca4e71f2
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Hacl_Policies.c
@@ -0,0 +1,42 @@
+#include "Hacl_Policies.h"
+
+uint8_t Hacl_Policies_declassify_u8(uint8_t x)
+{
+ return x;
+}
+
+uint32_t Hacl_Policies_declassify_u32(uint32_t x)
+{
+ return x;
+}
+
+uint64_t Hacl_Policies_declassify_u64(uint64_t x)
+{
+ return x;
+}
+
+FStar_UInt128_t Hacl_Policies_declassify_u128(FStar_UInt128_t x)
+{
+ return x;
+}
+
+uint8_t Hacl_Policies_cmp_bytes_(uint8_t *b1, uint8_t *b2, uint32_t len, uint8_t *tmp)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t bi1 = b1[i];
+ uint8_t bi2 = b2[i];
+ uint8_t z0 = tmp[0];
+ tmp[0] = FStar_UInt8_eq_mask(bi1, bi2) & z0;
+ }
+ return tmp[0];
+}
+
+uint8_t Hacl_Policies_cmp_bytes(uint8_t *b1, uint8_t *b2, uint32_t len)
+{
+ uint8_t tmp[1];
+ tmp[0] = (uint8_t )255;
+ uint8_t z = Hacl_Policies_cmp_bytes_(b1, b2, len, tmp);
+ return ~z;
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Hacl_Policies.h b/sw/airborne/modules/datalink/hacl-c/Hacl_Policies.h
new file mode 100644
index 0000000000..46c819a203
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Hacl_Policies.h
@@ -0,0 +1,21 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Hacl_Policies_H
+#define __Hacl_Policies_H
+
+
+
+
+
+uint8_t Hacl_Policies_declassify_u8(uint8_t x);
+
+uint32_t Hacl_Policies_declassify_u32(uint32_t x);
+
+uint64_t Hacl_Policies_declassify_u64(uint64_t x);
+
+FStar_UInt128_t Hacl_Policies_declassify_u128(FStar_UInt128_t x);
+
+uint8_t Hacl_Policies_cmp_bytes_(uint8_t *b1, uint8_t *b2, uint32_t len, uint8_t *tmp);
+
+uint8_t Hacl_Policies_cmp_bytes(uint8_t *b1, uint8_t *b2, uint32_t len);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/NaCl.c b/sw/airborne/modules/datalink/hacl-c/NaCl.c
new file mode 100644
index 0000000000..d65b27abdc
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/NaCl.c
@@ -0,0 +1,439 @@
+#include "NaCl.h"
+
+static void Hacl_SecretBox_ZeroPad_set_zero_bytes(uint8_t *b)
+{
+ uint8_t zero1 = (uint8_t )0;
+ b[0] = zero1;
+ b[1] = zero1;
+ b[2] = zero1;
+ b[3] = zero1;
+ b[4] = zero1;
+ b[5] = zero1;
+ b[6] = zero1;
+ b[7] = zero1;
+ b[8] = zero1;
+ b[9] = zero1;
+ b[10] = zero1;
+ b[11] = zero1;
+ b[12] = zero1;
+ b[13] = zero1;
+ b[14] = zero1;
+ b[15] = zero1;
+ b[16] = zero1;
+ b[17] = zero1;
+ b[18] = zero1;
+ b[19] = zero1;
+ b[20] = zero1;
+ b[21] = zero1;
+ b[22] = zero1;
+ b[23] = zero1;
+ b[24] = zero1;
+ b[25] = zero1;
+ b[26] = zero1;
+ b[27] = zero1;
+ b[28] = zero1;
+ b[29] = zero1;
+ b[30] = zero1;
+ b[31] = zero1;
+}
+
+static uint32_t
+Hacl_SecretBox_ZeroPad_crypto_secretbox_detached(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ uint32_t mlen_ = (uint32_t )mlen;
+ uint8_t subkey[32] = { 0 };
+ Salsa20_hsalsa20(subkey, k1, n1);
+ Salsa20_salsa20(c, m, mlen_ + (uint32_t )32, subkey, n1 + (uint32_t )16, (uint64_t )0);
+ Poly1305_64_crypto_onetimeauth(mac, c + (uint32_t )32, mlen, c);
+ Hacl_SecretBox_ZeroPad_set_zero_bytes(c);
+ Hacl_SecretBox_ZeroPad_set_zero_bytes(subkey);
+ return (uint32_t )0;
+}
+
+static uint32_t
+Hacl_SecretBox_ZeroPad_crypto_secretbox_open_detached_decrypt(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t clen,
+ uint8_t *n1,
+ uint8_t *subkey,
+ uint8_t verify
+)
+{
+ uint32_t clen_ = (uint32_t )clen;
+ if (verify == (uint8_t )0)
+ {
+ Salsa20_salsa20(m, c, clen_ + (uint32_t )32, subkey, n1 + (uint32_t )16, (uint64_t )0);
+ Hacl_SecretBox_ZeroPad_set_zero_bytes(subkey);
+ Hacl_SecretBox_ZeroPad_set_zero_bytes(m);
+ return (uint32_t )0;
+ }
+ else
+ return (uint32_t )0xffffffff;
+}
+
+static uint32_t
+Hacl_SecretBox_ZeroPad_crypto_secretbox_open_detached(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t clen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ uint8_t tmp[112] = { 0 };
+ uint8_t *subkey = tmp;
+ uint8_t *mackey = tmp + (uint32_t )32;
+ uint8_t *mackey_ = tmp + (uint32_t )64;
+ uint8_t *cmac = tmp + (uint32_t )96;
+ Salsa20_hsalsa20(subkey, k1, n1);
+ Salsa20_salsa20(mackey, mackey_, (uint32_t )32, subkey, n1 + (uint32_t )16, (uint64_t )0);
+ Poly1305_64_crypto_onetimeauth(cmac, c + (uint32_t )32, clen, mackey);
+ uint8_t result = Hacl_Policies_cmp_bytes(mac, cmac, (uint32_t )16);
+ uint8_t verify = result;
+ uint32_t
+ z =
+ Hacl_SecretBox_ZeroPad_crypto_secretbox_open_detached_decrypt(m,
+ c,
+ clen,
+ n1,
+ subkey,
+ verify);
+ return z;
+}
+
+static uint32_t
+Hacl_SecretBox_ZeroPad_crypto_secretbox_easy(
+ uint8_t *c,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ uint8_t cmac[16] = { 0 };
+ uint32_t res = Hacl_SecretBox_ZeroPad_crypto_secretbox_detached(c, cmac, m, mlen, n1, k1);
+ memcpy(c + (uint32_t )16, cmac, (uint32_t )16 * sizeof cmac[0]);
+ return res;
+}
+
+static uint32_t
+Hacl_SecretBox_ZeroPad_crypto_secretbox_open_easy(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t clen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ uint8_t *mac = c;
+ return Hacl_SecretBox_ZeroPad_crypto_secretbox_open_detached(m, c, mac, clen, n1, k1);
+}
+
+static uint32_t Hacl_Box_ZeroPad_crypto_box_beforenm(uint8_t *k1, uint8_t *pk, uint8_t *sk)
+{
+ uint8_t tmp[48] = { 0 };
+ uint8_t *hsalsa_k = tmp;
+ uint8_t *hsalsa_n = tmp + (uint32_t )32;
+ Curve25519_crypto_scalarmult(hsalsa_k, sk, pk);
+ Salsa20_hsalsa20(k1, hsalsa_k, hsalsa_n);
+ return (uint32_t )0;
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_detached_afternm(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_SecretBox_ZeroPad_crypto_secretbox_detached(c, mac, m, mlen, n1, k1);
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_detached(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ uint8_t key[80] = { 0 };
+ uint8_t *k1 = key;
+ uint8_t *subkey = key + (uint32_t )32;
+ uint8_t *hsalsa_n = key + (uint32_t )64;
+ Curve25519_crypto_scalarmult(k1, sk, pk);
+ Salsa20_hsalsa20(subkey, k1, hsalsa_n);
+ uint32_t z = Hacl_SecretBox_ZeroPad_crypto_secretbox_detached(c, mac, m, mlen, n1, subkey);
+ return z;
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_open_detached(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ uint8_t key[80] = { 0 };
+ uint8_t *k1 = key;
+ uint8_t *subkey = key + (uint32_t )32;
+ uint8_t *hsalsa_n = key + (uint32_t )64;
+ Curve25519_crypto_scalarmult(k1, sk, pk);
+ Salsa20_hsalsa20(subkey, k1, hsalsa_n);
+ uint32_t
+ z = Hacl_SecretBox_ZeroPad_crypto_secretbox_open_detached(m, c, mac, mlen, n1, subkey);
+ return z;
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_easy_afternm(
+ uint8_t *c,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ uint8_t cmac[16] = { 0 };
+ uint32_t z = Hacl_Box_ZeroPad_crypto_box_detached_afternm(c, cmac, m, mlen, n1, k1);
+ memcpy(c + (uint32_t )16, cmac, (uint32_t )16 * sizeof cmac[0]);
+ return z;
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_easy(
+ uint8_t *c,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ uint8_t cmac[16] = { 0 };
+ uint32_t res = Hacl_Box_ZeroPad_crypto_box_detached(c, cmac, m, mlen, n1, pk, sk);
+ memcpy(c + (uint32_t )16, cmac, (uint32_t )16 * sizeof cmac[0]);
+ return res;
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_open_easy(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ uint8_t *mac = c + (uint32_t )16;
+ return Hacl_Box_ZeroPad_crypto_box_open_detached(m, c, mac, mlen, n1, pk, sk);
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_open_detached_afternm(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_SecretBox_ZeroPad_crypto_secretbox_open_detached(m, c, mac, mlen, n1, k1);
+}
+
+static uint32_t
+Hacl_Box_ZeroPad_crypto_box_open_easy_afternm(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ uint8_t *mac = c;
+ uint32_t t = Hacl_Box_ZeroPad_crypto_box_open_detached_afternm(m, c, mac, mlen, n1, k1);
+ return t;
+}
+
+Prims_int NaCl_crypto_box_NONCEBYTES;
+
+Prims_int NaCl_crypto_box_PUBLICKEYBYTES;
+
+Prims_int NaCl_crypto_box_SECRETKEYBYTES;
+
+Prims_int NaCl_crypto_box_MACBYTES;
+
+Prims_int NaCl_crypto_secretbox_NONCEBYTES;
+
+Prims_int NaCl_crypto_secretbox_KEYBYTES;
+
+Prims_int NaCl_crypto_secretbox_MACBYTES;
+
+uint32_t
+NaCl_crypto_secretbox_detached(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_SecretBox_ZeroPad_crypto_secretbox_detached(c, mac, m, mlen, n1, k1);
+}
+
+uint32_t
+NaCl_crypto_secretbox_open_detached(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t clen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_SecretBox_ZeroPad_crypto_secretbox_open_detached(m, c, mac, clen, n1, k1);
+}
+
+uint32_t
+NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint64_t mlen, uint8_t *n1, uint8_t *k1)
+{
+ return Hacl_SecretBox_ZeroPad_crypto_secretbox_easy(c, m, mlen, n1, k1);
+}
+
+uint32_t
+NaCl_crypto_secretbox_open_easy(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t clen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_SecretBox_ZeroPad_crypto_secretbox_open_easy(m, c, clen, n1, k1);
+}
+
+uint32_t NaCl_crypto_box_beforenm(uint8_t *k1, uint8_t *pk, uint8_t *sk)
+{
+ return Hacl_Box_ZeroPad_crypto_box_beforenm(k1, pk, sk);
+}
+
+uint32_t
+NaCl_crypto_box_detached_afternm(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_Box_ZeroPad_crypto_box_detached_afternm(c, mac, m, mlen, n1, k1);
+}
+
+uint32_t
+NaCl_crypto_box_detached(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ return Hacl_Box_ZeroPad_crypto_box_detached(c, mac, m, mlen, n1, pk, sk);
+}
+
+uint32_t
+NaCl_crypto_box_open_detached(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ return Hacl_Box_ZeroPad_crypto_box_open_detached(m, c, mac, mlen, n1, pk, sk);
+}
+
+uint32_t
+NaCl_crypto_box_easy_afternm(uint8_t *c, uint8_t *m, uint64_t mlen, uint8_t *n1, uint8_t *k1)
+{
+ return Hacl_Box_ZeroPad_crypto_box_easy_afternm(c, m, mlen, n1, k1);
+}
+
+uint32_t
+NaCl_crypto_box_easy(
+ uint8_t *c,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ return Hacl_Box_ZeroPad_crypto_box_easy(c, m, mlen, n1, pk, sk);
+}
+
+uint32_t
+NaCl_crypto_box_open_easy(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+)
+{
+ return Hacl_Box_ZeroPad_crypto_box_open_easy(m, c, mlen, n1, pk, sk);
+}
+
+uint32_t
+NaCl_crypto_box_open_detached_afternm(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_Box_ZeroPad_crypto_box_open_detached_afternm(m, c, mac, mlen, n1, k1);
+}
+
+uint32_t
+NaCl_crypto_box_open_easy_afternm(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+)
+{
+ return Hacl_Box_ZeroPad_crypto_box_open_easy_afternm(m, c, mlen, n1, k1);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/NaCl.h b/sw/airborne/modules/datalink/hacl-c/NaCl.h
new file mode 100644
index 0000000000..69f5b37c56
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/NaCl.h
@@ -0,0 +1,134 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __NaCl_H
+#define __NaCl_H
+
+
+#include "Curve25519.h"
+#include "Salsa20.h"
+#include "Poly1305_64.h"
+#include "Hacl_Policies.h"
+
+
+extern Prims_int NaCl_crypto_box_NONCEBYTES;
+
+extern Prims_int NaCl_crypto_box_PUBLICKEYBYTES;
+
+extern Prims_int NaCl_crypto_box_SECRETKEYBYTES;
+
+extern Prims_int NaCl_crypto_box_MACBYTES;
+
+extern Prims_int NaCl_crypto_secretbox_NONCEBYTES;
+
+extern Prims_int NaCl_crypto_secretbox_KEYBYTES;
+
+extern Prims_int NaCl_crypto_secretbox_MACBYTES;
+
+uint32_t
+NaCl_crypto_secretbox_detached(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+);
+
+uint32_t
+NaCl_crypto_secretbox_open_detached(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t clen,
+ uint8_t *n1,
+ uint8_t *k1
+);
+
+uint32_t
+NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint64_t mlen, uint8_t *n1, uint8_t *k1);
+
+uint32_t
+NaCl_crypto_secretbox_open_easy(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t clen,
+ uint8_t *n1,
+ uint8_t *k1
+);
+
+uint32_t NaCl_crypto_box_beforenm(uint8_t *k1, uint8_t *pk, uint8_t *sk);
+
+uint32_t
+NaCl_crypto_box_detached_afternm(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+);
+
+uint32_t
+NaCl_crypto_box_detached(
+ uint8_t *c,
+ uint8_t *mac,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+);
+
+uint32_t
+NaCl_crypto_box_open_detached(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+);
+
+uint32_t
+NaCl_crypto_box_easy_afternm(uint8_t *c, uint8_t *m, uint64_t mlen, uint8_t *n1, uint8_t *k1);
+
+uint32_t
+NaCl_crypto_box_easy(
+ uint8_t *c,
+ uint8_t *m,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+);
+
+uint32_t
+NaCl_crypto_box_open_easy(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *pk,
+ uint8_t *sk
+);
+
+uint32_t
+NaCl_crypto_box_open_detached_afternm(
+ uint8_t *m,
+ uint8_t *c,
+ uint8_t *mac,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+);
+
+uint32_t
+NaCl_crypto_box_open_easy_afternm(
+ uint8_t *m,
+ uint8_t *c,
+ uint64_t mlen,
+ uint8_t *n1,
+ uint8_t *k1
+);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Poly1305_64.c b/sw/airborne/modules/datalink/hacl-c/Poly1305_64.c
new file mode 100644
index 0000000000..0702bffffd
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Poly1305_64.c
@@ -0,0 +1,518 @@
+#include "Poly1305_64.h"
+
+inline static void Hacl_Bignum_Modulo_reduce(uint64_t *b)
+{
+ uint64_t b0 = b[0];
+ b[0] = (b0 << (uint32_t )4) + (b0 << (uint32_t )2);
+}
+
+inline static void Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+ uint64_t b2 = b[2];
+ uint64_t b0 = b[0];
+ uint64_t b2_42 = b2 >> (uint32_t )42;
+ b[2] = b2 & (uint64_t )0x3ffffffffff;
+ b[0] = (b2_42 << (uint32_t )2) + b2_42 + b0;
+}
+
+inline static void Hacl_Bignum_Modulo_carry_top_wide(FStar_UInt128_t *b)
+{
+ FStar_UInt128_t b2 = b[2];
+ FStar_UInt128_t b0 = b[0];
+ FStar_UInt128_t
+ b2_ = FStar_UInt128_logand(b2, FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x3ffffffffff));
+ uint64_t
+ b2_42 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b2, (uint32_t )42));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_Int_Cast_Full_uint64_to_uint128((b2_42 << (uint32_t )2) + b2_42));
+ b[2] = b2_;
+ b[0] = b0_;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )3; i = i + (uint32_t )1)
+ {
+ FStar_UInt128_t uu____429 = input[i];
+ uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
+ output[i] = uu____428;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_shift(uint64_t *output)
+{
+ uint64_t tmp = output[2];
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint32_t ctr = (uint32_t )3 - i - (uint32_t )1;
+ uint64_t z = output[ctr - (uint32_t )1];
+ output[ctr] = z;
+ }
+ output[0] = tmp;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
+ FStar_UInt128_t *output,
+ uint64_t *input,
+ uint64_t s
+)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )3; i = i + (uint32_t )1)
+ {
+ FStar_UInt128_t uu____871 = output[i];
+ uint64_t uu____874 = input[i];
+ FStar_UInt128_t
+ uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
+ output[i] = uu____870;
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint32_t ctr = i;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t
+ r0 =
+ FStar_Int_Cast_Full_uint128_to_uint64(tctr)
+ & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t )44);
+ tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t )1] = FStar_UInt128_add(tctrp1, c);
+ }
+}
+
+inline static void Hacl_Bignum_Fproduct_carry_limb_(uint64_t *tmp)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint32_t ctr = i;
+ uint64_t tctr = tmp[ctr];
+ uint64_t tctrp1 = tmp[ctr + (uint32_t )1];
+ uint64_t r0 = tctr & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ uint64_t c = tctr >> (uint32_t )44;
+ tmp[ctr] = r0;
+ tmp[ctr + (uint32_t )1] = tctrp1 + c;
+ }
+}
+
+inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+ Hacl_Bignum_Fproduct_shift(output);
+ Hacl_Bignum_Modulo_reduce(output);
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input2)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )2; i = i + (uint32_t )1)
+ {
+ uint64_t input2i = input2[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ uint32_t i = (uint32_t )2;
+ uint64_t input2i = input2[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void Hacl_Bignum_Fmul_fmul_(uint64_t *output, uint64_t *input, uint64_t *input2)
+{
+ KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0), (uint32_t )3);
+ FStar_UInt128_t t[3];
+ for (uintmax_t _i = 0; _i < (uint32_t )3; ++_i)
+ t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0);
+ Hacl_Bignum_Fmul_mul_shift_reduce_(t, input, input2);
+ Hacl_Bignum_Fproduct_carry_wide_(t);
+ Hacl_Bignum_Modulo_carry_top_wide(t);
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+ uint64_t i0 = output[0];
+ uint64_t i1 = output[1];
+ uint64_t i0_ = i0 & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )44);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input2)
+{
+ uint64_t tmp[3] = { 0 };
+ memcpy(tmp, input, (uint32_t )3 * sizeof input[0]);
+ Hacl_Bignum_Fmul_fmul_(output, tmp, input2);
+}
+
+inline static void
+Hacl_Bignum_AddAndMultiply_add_and_multiply(uint64_t *acc, uint64_t *block, uint64_t *r)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )3; i = i + (uint32_t )1)
+ {
+ uint64_t uu____871 = acc[i];
+ uint64_t uu____874 = block[i];
+ uint64_t uu____870 = uu____871 + uu____874;
+ acc[i] = uu____870;
+ }
+ Hacl_Bignum_Fmul_fmul(acc, acc, r);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_update(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m
+)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *r3 = r;
+ uint64_t tmp[3] = { 0 };
+ FStar_UInt128_t m0 = load128_le(m);
+ uint64_t r0 = FStar_Int_Cast_Full_uint128_to_uint64(m0) & (uint64_t )0xfffffffffff;
+ uint64_t
+ r1 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )44))
+ & (uint64_t )0xfffffffffff;
+ uint64_t
+ r2 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )88));
+ tmp[0] = r0;
+ tmp[1] = r1;
+ tmp[2] = r2;
+ uint64_t b2 = tmp[2];
+ uint64_t b2_ = (uint64_t )0x10000000000 | b2;
+ tmp[2] = b2_;
+ Hacl_Bignum_AddAndMultiply_add_and_multiply(acc, tmp, r3);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_process_last_block_(
+ uint8_t *block,
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t rem_
+)
+{
+ uint64_t tmp[3] = { 0 };
+ FStar_UInt128_t m0 = load128_le(block);
+ uint64_t r0 = FStar_Int_Cast_Full_uint128_to_uint64(m0) & (uint64_t )0xfffffffffff;
+ uint64_t
+ r1 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )44))
+ & (uint64_t )0xfffffffffff;
+ uint64_t
+ r2 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t )88));
+ tmp[0] = r0;
+ tmp[1] = r1;
+ tmp[2] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ Hacl_Bignum_AddAndMultiply_add_and_multiply(h, tmp, r);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_process_last_block(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t rem_
+)
+{
+ uint8_t zero1 = (uint8_t )0;
+ KRML_CHECK_SIZE(zero1, (uint32_t )16);
+ uint8_t block[16];
+ for (uintmax_t _i = 0; _i < (uint32_t )16; ++_i)
+ block[_i] = zero1;
+ uint32_t i0 = (uint32_t )rem_;
+ uint32_t i = (uint32_t )rem_;
+ memcpy(block, m, i * sizeof m[0]);
+ block[i0] = (uint8_t )1;
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block_(block, st, m, rem_);
+}
+
+static void Hacl_Impl_Poly1305_64_poly1305_last_pass(uint64_t *acc)
+{
+ Hacl_Bignum_Fproduct_carry_limb_(acc);
+ Hacl_Bignum_Modulo_carry_top(acc);
+ uint64_t a0 = acc[0];
+ uint64_t a10 = acc[1];
+ uint64_t a20 = acc[2];
+ uint64_t a0_ = a0 & (uint64_t )0xfffffffffff;
+ uint64_t r0 = a0 >> (uint32_t )44;
+ uint64_t a1_ = (a10 + r0) & (uint64_t )0xfffffffffff;
+ uint64_t r1 = (a10 + r0) >> (uint32_t )44;
+ uint64_t a2_ = a20 + r1;
+ acc[0] = a0_;
+ acc[1] = a1_;
+ acc[2] = a2_;
+ Hacl_Bignum_Modulo_carry_top(acc);
+ uint64_t i0 = acc[0];
+ uint64_t i1 = acc[1];
+ uint64_t i0_ = i0 & (((uint64_t )1 << (uint32_t )44) - (uint64_t )1);
+ uint64_t i1_ = i1 + (i0 >> (uint32_t )44);
+ acc[0] = i0_;
+ acc[1] = i1_;
+ uint64_t a00 = acc[0];
+ uint64_t a1 = acc[1];
+ uint64_t a2 = acc[2];
+ uint64_t mask0 = FStar_UInt64_gte_mask(a00, (uint64_t )0xffffffffffb);
+ uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t )0xfffffffffff);
+ uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t )0x3ffffffffff);
+ uint64_t mask = mask0 & mask1 & mask2;
+ uint64_t a0_0 = a00 - ((uint64_t )0xffffffffffb & mask);
+ uint64_t a1_0 = a1 - ((uint64_t )0xfffffffffff & mask);
+ uint64_t a2_0 = a2 - ((uint64_t )0x3ffffffffff & mask);
+ acc[0] = a0_0;
+ acc[1] = a1_0;
+ acc[2] = a2_0;
+}
+
+static Hacl_Impl_Poly1305_64_State_poly1305_state
+Hacl_Impl_Poly1305_64_mk_state(uint64_t *r, uint64_t *h)
+{
+ return ((Hacl_Impl_Poly1305_64_State_poly1305_state ){ .r = r, .h = h });
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_blocks(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t len1
+)
+{
+ if (len1 == (uint64_t )0)
+ {
+
+ }
+ else
+ {
+ uint8_t *block = m;
+ uint8_t *tail1 = m + (uint32_t )16;
+ Hacl_Impl_Poly1305_64_poly1305_update(st, block);
+ uint64_t len2 = len1 - (uint64_t )1;
+ Hacl_Standalone_Poly1305_64_poly1305_blocks(st, tail1, len2);
+ }
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_partial(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *kr
+)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *x0 = r;
+ FStar_UInt128_t k1 = load128_le(kr);
+ FStar_UInt128_t
+ k_clamped =
+ FStar_UInt128_logand(k1,
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x0ffffffc0ffffffc),
+ (uint32_t )64),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x0ffffffc0fffffff)));
+ uint64_t r0 = FStar_Int_Cast_Full_uint128_to_uint64(k_clamped) & (uint64_t )0xfffffffffff;
+ uint64_t
+ r1 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t )44))
+ & (uint64_t )0xfffffffffff;
+ uint64_t
+ r2 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t )88));
+ x0[0] = r0;
+ x0[1] = r1;
+ x0[2] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *x00 = h;
+ x00[0] = (uint64_t )0;
+ x00[1] = (uint64_t )0;
+ x00[2] = (uint64_t )0;
+ Hacl_Standalone_Poly1305_64_poly1305_blocks(st, input, len1);
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_complete(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t len1,
+ uint8_t *k1
+)
+{
+ uint8_t *kr = k1;
+ uint64_t len16 = len1 >> (uint32_t )4;
+ uint64_t rem16 = len1 & (uint64_t )0xf;
+ uint8_t *part_input = m;
+ uint8_t *last_block = m + (uint32_t )((uint64_t )16 * len16);
+ Hacl_Standalone_Poly1305_64_poly1305_partial(st, part_input, len16, kr);
+ if (rem16 == (uint64_t )0)
+ {
+
+ }
+ else
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block(st, last_block, rem16);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_poly1305_last_pass(acc);
+}
+
+static void
+Hacl_Standalone_Poly1305_64_crypto_onetimeauth_(
+ uint8_t *output,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *k1
+)
+{
+ uint64_t buf[6] = { 0 };
+ uint64_t *r = buf;
+ uint64_t *h = buf + (uint32_t )3;
+ Hacl_Impl_Poly1305_64_State_poly1305_state st = Hacl_Impl_Poly1305_64_mk_state(r, h);
+ uint8_t *key_s = k1 + (uint32_t )16;
+ Hacl_Standalone_Poly1305_64_poly1305_complete(st, input, len1, k1);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h3 = scrut.h;
+ uint64_t *acc = h3;
+ FStar_UInt128_t k_ = load128_le(key_s);
+ uint64_t h0 = acc[0];
+ uint64_t h1 = acc[1];
+ uint64_t h2 = acc[2];
+ FStar_UInt128_t
+ acc_ =
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128(h2
+ << (uint32_t )24
+ | h1 >> (uint32_t )20),
+ (uint32_t )64),
+ FStar_Int_Cast_Full_uint64_to_uint128(h1 << (uint32_t )44 | h0));
+ FStar_UInt128_t mac_ = FStar_UInt128_add_mod(acc_, k_);
+ store128_le(output, mac_);
+}
+
+static void
+Hacl_Standalone_Poly1305_64_crypto_onetimeauth(
+ uint8_t *output,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *k1
+)
+{
+ Hacl_Standalone_Poly1305_64_crypto_onetimeauth_(output, input, len1, k1);
+}
+
+void *Poly1305_64_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b)
+{
+ return (void *)(uint8_t )0;
+}
+
+Hacl_Impl_Poly1305_64_State_poly1305_state Poly1305_64_mk_state(uint64_t *r, uint64_t *acc)
+{
+ return Hacl_Impl_Poly1305_64_mk_state(r, acc);
+}
+
+void Poly1305_64_init(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *k1)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *x0 = r;
+ FStar_UInt128_t k10 = load128_le(k1);
+ FStar_UInt128_t
+ k_clamped =
+ FStar_UInt128_logand(k10,
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x0ffffffc0ffffffc),
+ (uint32_t )64),
+ FStar_Int_Cast_Full_uint64_to_uint128((uint64_t )0x0ffffffc0fffffff)));
+ uint64_t r0 = FStar_Int_Cast_Full_uint128_to_uint64(k_clamped) & (uint64_t )0xfffffffffff;
+ uint64_t
+ r1 =
+ FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t )44))
+ & (uint64_t )0xfffffffffff;
+ uint64_t
+ r2 = FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t )88));
+ x0[0] = r0;
+ x0[1] = r1;
+ x0[2] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *x00 = h;
+ x00[0] = (uint64_t )0;
+ x00[1] = (uint64_t )0;
+ x00[2] = (uint64_t )0;
+}
+
+void *Poly1305_64_empty_log = (void *)(uint8_t )0;
+
+void Poly1305_64_update_block(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *m)
+{
+ Hacl_Impl_Poly1305_64_poly1305_update(st, m);
+}
+
+void
+Poly1305_64_update(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *m, uint32_t len1)
+{
+ if (len1 == (uint32_t )0)
+ {
+
+ }
+ else
+ {
+ uint8_t *block = m;
+ uint8_t *m_ = m + (uint32_t )16;
+ uint32_t len2 = len1 - (uint32_t )1;
+ Poly1305_64_update_block(st, block);
+ Poly1305_64_update(st, m_, len2);
+ }
+}
+
+void
+Poly1305_64_update_last(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint32_t len1
+)
+{
+ if ((uint64_t )len1 == (uint64_t )0)
+ {
+
+ }
+ else
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block(st, m, (uint64_t )len1);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_poly1305_last_pass(acc);
+}
+
+void
+Poly1305_64_finish(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *mac, uint8_t *k1)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ FStar_UInt128_t k_ = load128_le(k1);
+ uint64_t h0 = acc[0];
+ uint64_t h1 = acc[1];
+ uint64_t h2 = acc[2];
+ FStar_UInt128_t
+ acc_ =
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128(h2
+ << (uint32_t )24
+ | h1 >> (uint32_t )20),
+ (uint32_t )64),
+ FStar_Int_Cast_Full_uint64_to_uint128(h1 << (uint32_t )44 | h0));
+ FStar_UInt128_t mac_ = FStar_UInt128_add_mod(acc_, k_);
+ store128_le(mac, mac_);
+}
+
+void
+Poly1305_64_crypto_onetimeauth(uint8_t *output, uint8_t *input, uint64_t len1, uint8_t *k1)
+{
+ Hacl_Standalone_Poly1305_64_crypto_onetimeauth(output, input, len1, k1);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Poly1305_64.h b/sw/airborne/modules/datalink/hacl-c/Poly1305_64.h
new file mode 100644
index 0000000000..5d46a57c1e
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Poly1305_64.h
@@ -0,0 +1,84 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Poly1305_64_H
+#define __Poly1305_64_H
+
+
+
+#include "testlib.h"
+
+typedef uint64_t Hacl_Bignum_Constants_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Constants_wide;
+
+typedef FStar_UInt128_t Hacl_Bignum_Wide_t;
+
+typedef uint64_t Hacl_Bignum_Limb_t;
+
+typedef void *Hacl_Impl_Poly1305_64_State_log_t;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_State_bigint;
+
+typedef void *Hacl_Impl_Poly1305_64_State_seqelem;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_State_elemB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_wordB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_wordB_16;
+
+typedef struct
+{
+ uint64_t *r;
+ uint64_t *h;
+}
+Hacl_Impl_Poly1305_64_State_poly1305_state;
+
+typedef void *Hacl_Impl_Poly1305_64_log_t;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_bigint;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_elemB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_wordB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_wordB_16;
+
+typedef uint8_t *Poly1305_64_uint8_p;
+
+typedef uint64_t Poly1305_64_uint64_t;
+
+void *Poly1305_64_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b);
+
+typedef uint8_t *Poly1305_64_key;
+
+typedef Hacl_Impl_Poly1305_64_State_poly1305_state Poly1305_64_state;
+
+Hacl_Impl_Poly1305_64_State_poly1305_state Poly1305_64_mk_state(uint64_t *r, uint64_t *acc);
+
+void Poly1305_64_init(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *k1);
+
+extern void *Poly1305_64_empty_log;
+
+void Poly1305_64_update_block(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *m);
+
+void
+Poly1305_64_update(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *m, uint32_t len1);
+
+void
+Poly1305_64_update_last(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint32_t len1
+);
+
+void
+Poly1305_64_finish(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *mac, uint8_t *k1);
+
+void
+Poly1305_64_crypto_onetimeauth(uint8_t *output, uint8_t *input, uint64_t len1, uint8_t *k1);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/README.md b/sw/airborne/modules/datalink/hacl-c/README.md
new file mode 100644
index 0000000000..e32f08e433
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/README.md
@@ -0,0 +1,145 @@
+HACL*
+=====
+
+HACL* is a formally verified cryptographic library in [F\*],
+developed by the [Prosecco](http://prosecco.inria.fr) team at
+[INRIA Paris](https://www.inria.fr/en/centre/paris) in collaboration
+with Microsoft Research, as part of [Project Everest].
+
+HACL stands for High-Assurance Cryptographic Library and its design is
+inspired by discussions at the [HACS series of workshops](https://github.com/HACS-workshop).
+The goal of this library is to develop verified C reference implementations
+for popular cryptographic primitives and to verify them for memory safety,
+functional correctness, and secret independence.
+
+More details about the HACL* library and its design can be found in our ACM CCS 2017 research paper:
+https://eprint.iacr.org/2017/536
+
+All our code is written and verified in [F\*] and then compiled to C via
+the [KreMLin tool](https://github.com/FStarLang/kremlin/). Details on the verification and compilation
+toolchain and their formal guarantees can be found in the ICFP 2017 paper:
+https://arxiv.org/abs/1703.00053
+
+# Supported Cryptographic Algorithms
+
+The primitives and constructions supported currently are:
+
+* Stream ciphers: Chacha20, Salsa20, XSalsa20
+* MACs: Poly1305, HMAC
+* Elliptic Curves: Curve25519
+* Elliptic Curves Signatures: Ed25519
+* Hash functions: SHA2 (256,384,512)
+* NaCl API: secret_box, box, sign
+* TLS API: IETF Chacha20Poly1305 AEAD
+
+Developers can use HACL* through the [NaCl API].
+In particular, we implement the same C API as [libsodium] for the
+NaCl constructions, so any application that relies on
+libsodium only for these constructions can be immediately ported to use the verified code in HACL*
+instead. (Warning: libsodium also implements other algorithms not in NaCl
+that are not implemented by HACL*)
+
+The verified primitives can also be used to support larger F* verification projects.
+For example, HACL* code is used through the agile cryptographic model developed in
+[secure_api/] as the basis for cryptographic proofs of the TLS record layer in [miTLS].
+A detailed description of the code in [secure_api/] and its formal security guarantees
+appears in the IEEE S&P 2017 paper: https://eprint.iacr.org/2016/1178.pdf
+
+[F\*]: https://github.com/FStarLang/FStar
+[KreMLin]: https://github.com/FStarLang/kremlin
+[miTLS]: https://github.com/mitls/mitls-fstar
+[NaCl API]: https://nacl.cr.yp.to
+[libsodium]: https://github.com/jedisct1/libsodium
+[Project Everest]: https://github.com/project-everest
+[secure_api/]: https://github.com/mitls/hacl-star/tree/master/secure_api
+
+# Warning
+
+This library is at the pre-production stage.
+Please consult the authors before using it in production systems.
+
+The first release is to be expected around the time of [ACM CCS].
+Any feedback is welcome in the meantime.
+
+[ACM CCS]: https://www.sigsac.org/ccs/CCS2017/
+
+# Licenses
+
+All F* source code is released under Apache 2.0
+
+All generated C code is released under MIT
+
+
+# Installation
+
+See [INSTALL.md](INSTALL.md) for prerequisites.
+
+For convenience, C code for our verified primitives has already been extracted
+and is available in [snapshots/hacl-c](snapshots/hacl-c).
+To build the library, you need a modern C compiler (preferably GCC-7).
+
+[INSTALL.md]: https://github.com/mitls/hacl-star/INSTALL.md
+
+
+# Verifying and Building HACL*
+
+Type `make` to get more information:
+```
+HACL* Makefile:
+If you want to run and test the C library:
+- 'make build' will generate a shared library from the hacl-c snapshot (no verification)
+- 'make unit-tests' will run tests on the library built rom the hacl-c snapshot (no verification)
+- 'make clean-build' will clean 'build' artifacts
+
+If you want to verify the F* code and regenerate the C library:
+- 'make prepare' will try to install F* and Kremlin (still has some prerequisites)
+- 'make verify' will run F* verification on all specs, code and secure-api directories
+- 'make extract' will generate all the C code into a snapshot and test it (no verification)
+- 'make test-all' will generate and test everything (no verification)
+- 'make world' will run everything (except make prepare)
+- 'make clean' will remove all artifacts created by other targets
+```
+
+Verification and C code generation requires [F\*] and [KreMLin].
+Benchmarking performance in `test-all` requires [openssl] and [libsodium].
+An additional CMake build is available and can be run with `make build-cmake`.
+
+
+# Performance
+
+To measure see the performance of HACL* primitives on your platform and C compiler,
+run the targets from `test/Makefile` if you have the dependencies installed. (experimental)
+To compare its performance with the C reference code (not the assembly versions) in [libsodium] and [openssl],
+download and compile [libsodium] with the `--disable-asm` flag and [openssl] with the `-no-asm` flag.
+
+While HACL* is typically as fast as hand-written C code, it is typically 1.1-5.7x slower than
+assembly code in our experiments. In the future, we hope to close this gap by using verified assembly implementations
+like [Vale](https://github.com/project-everest/vale) for some primitives.
+
+[openssl]: https://github.com/openssl/openssl
+[libsodium]: https://github.com/jedisct1/libsodium
+
+
+# Experimental features
+
+The [code/experimental](code/experimental) directory includes other (partially verified) cryptographic primitives that will become part of the library in the near future:
+* Encryption: AES-128, AES-256
+* MACs: GCM
+* Key Derivation: HKDF
+* Signatures: RSA-PSS
+
+We are also working on a JavaScript backend for F* that would enable us to extract HACL* as a JavaScript library.
+
+
+# Authors and Maintainers
+
+HACL* was originially developed as part of the Ph.D. thesis of Jean Karim Zinzindohoué
+in the [Prosecco](http://prosecco.inria.fr) team at [INRIA Paris](https://www.inria.fr/en/centre/paris).
+It contains contributions from many researchers at INRIA and Microsoft Research, and is
+being actively developed and maintained within [Project Everest].
+
+For questions and comments, or if you want to contribute to the project, do contact the current maintainers at:
+* Benjamin Beurdouche (benjamin.beurdouche@inria.fr)
+* Karthikeyan Bhargavan (karthikeyan.bhargavan@inria.fr)
+
+
diff --git a/sw/airborne/modules/datalink/hacl-c/SHA2_256.c b/sw/airborne/modules/datalink/hacl-c/SHA2_256.c
new file mode 100644
index 0000000000..2a34f021c0
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/SHA2_256.c
@@ -0,0 +1,311 @@
+#include "SHA2_256.h"
+
+static void
+Hacl_Hash_Lib_LoadStore_uint32s_from_be_bytes(uint32_t *output, uint8_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *x0 = input + (uint32_t )4 * i;
+ uint32_t inputi = load32_be(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Hash_Lib_LoadStore_uint32s_to_be_bytes(uint8_t *output, uint32_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint32_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t )4 * i;
+ store32_be(x0, hd1);
+ }
+}
+
+static void Hacl_Hash_SHA2_256_init(uint32_t *state)
+{
+ (void )(state + (uint32_t )136);
+ uint32_t *k1 = state;
+ uint32_t *h_01 = state + (uint32_t )128;
+ uint32_t *p10 = k1;
+ uint32_t *p20 = k1 + (uint32_t )16;
+ uint32_t *p3 = k1 + (uint32_t )32;
+ uint32_t *p4 = k1 + (uint32_t )48;
+ uint32_t *p11 = p10;
+ uint32_t *p21 = p10 + (uint32_t )8;
+ uint32_t *p12 = p11;
+ uint32_t *p22 = p11 + (uint32_t )4;
+ p12[0] = (uint32_t )0x428a2f98;
+ p12[1] = (uint32_t )0x71374491;
+ p12[2] = (uint32_t )0xb5c0fbcf;
+ p12[3] = (uint32_t )0xe9b5dba5;
+ p22[0] = (uint32_t )0x3956c25b;
+ p22[1] = (uint32_t )0x59f111f1;
+ p22[2] = (uint32_t )0x923f82a4;
+ p22[3] = (uint32_t )0xab1c5ed5;
+ uint32_t *p13 = p21;
+ uint32_t *p23 = p21 + (uint32_t )4;
+ p13[0] = (uint32_t )0xd807aa98;
+ p13[1] = (uint32_t )0x12835b01;
+ p13[2] = (uint32_t )0x243185be;
+ p13[3] = (uint32_t )0x550c7dc3;
+ p23[0] = (uint32_t )0x72be5d74;
+ p23[1] = (uint32_t )0x80deb1fe;
+ p23[2] = (uint32_t )0x9bdc06a7;
+ p23[3] = (uint32_t )0xc19bf174;
+ uint32_t *p14 = p20;
+ uint32_t *p24 = p20 + (uint32_t )8;
+ uint32_t *p15 = p14;
+ uint32_t *p25 = p14 + (uint32_t )4;
+ p15[0] = (uint32_t )0xe49b69c1;
+ p15[1] = (uint32_t )0xefbe4786;
+ p15[2] = (uint32_t )0x0fc19dc6;
+ p15[3] = (uint32_t )0x240ca1cc;
+ p25[0] = (uint32_t )0x2de92c6f;
+ p25[1] = (uint32_t )0x4a7484aa;
+ p25[2] = (uint32_t )0x5cb0a9dc;
+ p25[3] = (uint32_t )0x76f988da;
+ uint32_t *p16 = p24;
+ uint32_t *p26 = p24 + (uint32_t )4;
+ p16[0] = (uint32_t )0x983e5152;
+ p16[1] = (uint32_t )0xa831c66d;
+ p16[2] = (uint32_t )0xb00327c8;
+ p16[3] = (uint32_t )0xbf597fc7;
+ p26[0] = (uint32_t )0xc6e00bf3;
+ p26[1] = (uint32_t )0xd5a79147;
+ p26[2] = (uint32_t )0x06ca6351;
+ p26[3] = (uint32_t )0x14292967;
+ uint32_t *p17 = p3;
+ uint32_t *p27 = p3 + (uint32_t )8;
+ uint32_t *p18 = p17;
+ uint32_t *p28 = p17 + (uint32_t )4;
+ p18[0] = (uint32_t )0x27b70a85;
+ p18[1] = (uint32_t )0x2e1b2138;
+ p18[2] = (uint32_t )0x4d2c6dfc;
+ p18[3] = (uint32_t )0x53380d13;
+ p28[0] = (uint32_t )0x650a7354;
+ p28[1] = (uint32_t )0x766a0abb;
+ p28[2] = (uint32_t )0x81c2c92e;
+ p28[3] = (uint32_t )0x92722c85;
+ uint32_t *p19 = p27;
+ uint32_t *p29 = p27 + (uint32_t )4;
+ p19[0] = (uint32_t )0xa2bfe8a1;
+ p19[1] = (uint32_t )0xa81a664b;
+ p19[2] = (uint32_t )0xc24b8b70;
+ p19[3] = (uint32_t )0xc76c51a3;
+ p29[0] = (uint32_t )0xd192e819;
+ p29[1] = (uint32_t )0xd6990624;
+ p29[2] = (uint32_t )0xf40e3585;
+ p29[3] = (uint32_t )0x106aa070;
+ uint32_t *p110 = p4;
+ uint32_t *p210 = p4 + (uint32_t )8;
+ uint32_t *p1 = p110;
+ uint32_t *p211 = p110 + (uint32_t )4;
+ p1[0] = (uint32_t )0x19a4c116;
+ p1[1] = (uint32_t )0x1e376c08;
+ p1[2] = (uint32_t )0x2748774c;
+ p1[3] = (uint32_t )0x34b0bcb5;
+ p211[0] = (uint32_t )0x391c0cb3;
+ p211[1] = (uint32_t )0x4ed8aa4a;
+ p211[2] = (uint32_t )0x5b9cca4f;
+ p211[3] = (uint32_t )0x682e6ff3;
+ uint32_t *p111 = p210;
+ uint32_t *p212 = p210 + (uint32_t )4;
+ p111[0] = (uint32_t )0x748f82ee;
+ p111[1] = (uint32_t )0x78a5636f;
+ p111[2] = (uint32_t )0x84c87814;
+ p111[3] = (uint32_t )0x8cc70208;
+ p212[0] = (uint32_t )0x90befffa;
+ p212[1] = (uint32_t )0xa4506ceb;
+ p212[2] = (uint32_t )0xbef9a3f7;
+ p212[3] = (uint32_t )0xc67178f2;
+ uint32_t *p112 = h_01;
+ uint32_t *p2 = h_01 + (uint32_t )4;
+ p112[0] = (uint32_t )0x6a09e667;
+ p112[1] = (uint32_t )0xbb67ae85;
+ p112[2] = (uint32_t )0x3c6ef372;
+ p112[3] = (uint32_t )0xa54ff53a;
+ p2[0] = (uint32_t )0x510e527f;
+ p2[1] = (uint32_t )0x9b05688c;
+ p2[2] = (uint32_t )0x1f83d9ab;
+ p2[3] = (uint32_t )0x5be0cd19;
+}
+
+static void Hacl_Hash_SHA2_256_update(uint32_t *state, uint8_t *data)
+{
+ uint32_t data_w[16] = { 0 };
+ Hacl_Hash_Lib_LoadStore_uint32s_from_be_bytes(data_w, data, (uint32_t )16);
+ uint32_t *hash_w = state + (uint32_t )128;
+ uint32_t *ws_w = state + (uint32_t )64;
+ uint32_t *k_w = state;
+ uint32_t *counter_w = state + (uint32_t )136;
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint32_t uu____206 = data_w[i];
+ ws_w[i] = uu____206;
+ }
+ for (uint32_t i = (uint32_t )16; i < (uint32_t )64; i = i + (uint32_t )1)
+ {
+ uint32_t t16 = ws_w[i - (uint32_t )16];
+ uint32_t t15 = ws_w[i - (uint32_t )15];
+ uint32_t t7 = ws_w[i - (uint32_t )7];
+ uint32_t t2 = ws_w[i - (uint32_t )2];
+ ws_w[i] =
+ ((t2 >> (uint32_t )17 | t2 << (uint32_t )32 - (uint32_t )17)
+ ^ (t2 >> (uint32_t )19 | t2 << (uint32_t )32 - (uint32_t )19) ^ t2 >> (uint32_t )10)
+ +
+ t7
+ +
+ ((t15 >> (uint32_t )7 | t15 << (uint32_t )32 - (uint32_t )7)
+ ^ (t15 >> (uint32_t )18 | t15 << (uint32_t )32 - (uint32_t )18) ^ t15 >> (uint32_t )3)
+ + t16;
+ }
+ uint32_t hash_0[8] = { 0 };
+ memcpy(hash_0, hash_w, (uint32_t )8 * sizeof hash_w[0]);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )64; i = i + (uint32_t )1)
+ {
+ uint32_t a = hash_0[0];
+ uint32_t b = hash_0[1];
+ uint32_t c = hash_0[2];
+ uint32_t d = hash_0[3];
+ uint32_t e = hash_0[4];
+ uint32_t f1 = hash_0[5];
+ uint32_t g = hash_0[6];
+ uint32_t h = hash_0[7];
+ uint32_t kt = k_w[i];
+ uint32_t wst = ws_w[i];
+ uint32_t
+ t1 =
+ h
+ +
+ ((e >> (uint32_t )6 | e << (uint32_t )32 - (uint32_t )6)
+ ^
+ (e >> (uint32_t )11 | e << (uint32_t )32 - (uint32_t )11)
+ ^ (e >> (uint32_t )25 | e << (uint32_t )32 - (uint32_t )25))
+ + (e & f1 ^ ~e & g)
+ + kt
+ + wst;
+ uint32_t
+ t2 =
+ ((a >> (uint32_t )2 | a << (uint32_t )32 - (uint32_t )2)
+ ^
+ (a >> (uint32_t )13 | a << (uint32_t )32 - (uint32_t )13)
+ ^ (a >> (uint32_t )22 | a << (uint32_t )32 - (uint32_t )22))
+ + (a & b ^ a & c ^ b & c);
+ uint32_t x1 = t1 + t2;
+ uint32_t x5 = d + t1;
+ uint32_t *p1 = hash_0;
+ uint32_t *p2 = hash_0 + (uint32_t )4;
+ p1[0] = x1;
+ p1[1] = a;
+ p1[2] = b;
+ p1[3] = c;
+ p2[0] = x5;
+ p2[1] = e;
+ p2[2] = f1;
+ p2[3] = g;
+ }
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )8; i = i + (uint32_t )1)
+ {
+ uint32_t uu____871 = hash_w[i];
+ uint32_t uu____874 = hash_0[i];
+ uint32_t uu____870 = uu____871 + uu____874;
+ hash_w[i] = uu____870;
+ }
+ uint32_t c0 = counter_w[0];
+ uint32_t one1 = (uint32_t )1;
+ counter_w[0] = c0 + one1;
+}
+
+static void Hacl_Hash_SHA2_256_update_multi(uint32_t *state, uint8_t *data, uint32_t n1)
+{
+ for (uint32_t i = (uint32_t )0; i < n1; i = i + (uint32_t )1)
+ {
+ uint8_t *b = data + i * (uint32_t )64;
+ Hacl_Hash_SHA2_256_update(state, b);
+ }
+}
+
+static void Hacl_Hash_SHA2_256_update_last(uint32_t *state, uint8_t *data, uint32_t len)
+{
+ uint8_t blocks[128] = { 0 };
+ K___uint32_t_uint8_t_ uu____1925;
+ if (len < (uint32_t )56)
+ uu____1925 = ((K___uint32_t_uint8_t_ ){ .fst = (uint32_t )1, .snd = blocks + (uint32_t )64 });
+ else
+ uu____1925 = ((K___uint32_t_uint8_t_ ){ .fst = (uint32_t )2, .snd = blocks });
+ K___uint32_t_uint8_t_ scrut = uu____1925;
+ uint32_t nb = scrut.fst;
+ uint8_t *final_blocks = scrut.snd;
+ memcpy(final_blocks, data, len * sizeof data[0]);
+ uint32_t n1 = state[136];
+ uint8_t *padding = final_blocks + len;
+ uint32_t
+ pad0len = ((uint32_t )64 - (len + (uint32_t )8 + (uint32_t )1) % (uint32_t )64) % (uint32_t )64;
+ uint8_t *buf1 = padding;
+ (void )(padding + (uint32_t )1);
+ uint8_t *buf2 = padding + (uint32_t )1 + pad0len;
+ uint64_t
+ encodedlen =
+ ((uint64_t )n1 * (uint64_t )(uint32_t )64 + (uint64_t )len)
+ * (uint64_t )(uint32_t )8;
+ buf1[0] = (uint8_t )0x80;
+ store64_be(buf2, encodedlen);
+ Hacl_Hash_SHA2_256_update_multi(state, final_blocks, nb);
+}
+
+static void Hacl_Hash_SHA2_256_finish(uint32_t *state, uint8_t *hash1)
+{
+ uint32_t *hash_w = state + (uint32_t )128;
+ Hacl_Hash_Lib_LoadStore_uint32s_to_be_bytes(hash1, hash_w, (uint32_t )8);
+}
+
+static void Hacl_Hash_SHA2_256_hash(uint8_t *hash1, uint8_t *input, uint32_t len)
+{
+ uint32_t state[137] = { 0 };
+ uint32_t n1 = len / (uint32_t )64;
+ uint32_t r = len % (uint32_t )64;
+ uint8_t *input_blocks = input;
+ uint8_t *input_last = input + n1 * (uint32_t )64;
+ Hacl_Hash_SHA2_256_init(state);
+ Hacl_Hash_SHA2_256_update_multi(state, input_blocks, n1);
+ Hacl_Hash_SHA2_256_update_last(state, input_last, r);
+ Hacl_Hash_SHA2_256_finish(state, hash1);
+}
+
+uint32_t SHA2_256_size_hash = (uint32_t )32;
+
+uint32_t SHA2_256_size_block = (uint32_t )64;
+
+uint32_t SHA2_256_size_state = (uint32_t )137;
+
+void SHA2_256_init(uint32_t *state)
+{
+ Hacl_Hash_SHA2_256_init(state);
+}
+
+void SHA2_256_update(uint32_t *state, uint8_t *data_8)
+{
+ Hacl_Hash_SHA2_256_update(state, data_8);
+}
+
+void SHA2_256_update_multi(uint32_t *state, uint8_t *data, uint32_t n1)
+{
+ Hacl_Hash_SHA2_256_update_multi(state, data, n1);
+}
+
+void SHA2_256_update_last(uint32_t *state, uint8_t *data, uint32_t len)
+{
+ Hacl_Hash_SHA2_256_update_last(state, data, len);
+}
+
+void SHA2_256_finish(uint32_t *state, uint8_t *hash1)
+{
+ Hacl_Hash_SHA2_256_finish(state, hash1);
+}
+
+void SHA2_256_hash(uint8_t *hash1, uint8_t *input, uint32_t len)
+{
+ Hacl_Hash_SHA2_256_hash(hash1, input, len);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/SHA2_256.h b/sw/airborne/modules/datalink/hacl-c/SHA2_256.h
new file mode 100644
index 0000000000..18587b8819
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/SHA2_256.h
@@ -0,0 +1,84 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __SHA2_256_H
+#define __SHA2_256_H
+
+
+
+#include "testlib.h"
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_t;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_t;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_t;
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_ht;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_ht;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_ht;
+
+typedef uint8_t *Hacl_Hash_Lib_Create_uint8_p;
+
+typedef uint32_t *Hacl_Hash_Lib_Create_uint32_p;
+
+typedef uint64_t *Hacl_Hash_Lib_Create_uint64_p;
+
+typedef uint8_t *Hacl_Hash_Lib_LoadStore_uint8_p;
+
+typedef uint8_t Hacl_Hash_SHA2_256_uint8_t;
+
+typedef uint32_t Hacl_Hash_SHA2_256_uint32_t;
+
+typedef uint64_t Hacl_Hash_SHA2_256_uint64_t;
+
+typedef uint8_t Hacl_Hash_SHA2_256_uint8_ht;
+
+typedef uint32_t Hacl_Hash_SHA2_256_uint32_ht;
+
+typedef uint64_t Hacl_Hash_SHA2_256_uint64_ht;
+
+typedef uint32_t *Hacl_Hash_SHA2_256_uint32_p;
+
+typedef uint8_t *Hacl_Hash_SHA2_256_uint8_p;
+
+typedef struct
+{
+ uint32_t fst;
+ uint8_t *snd;
+}
+K___uint32_t_uint8_t_;
+
+typedef uint8_t SHA2_256_uint8_t;
+
+typedef uint32_t SHA2_256_uint32_t;
+
+typedef uint64_t SHA2_256_uint64_t;
+
+typedef uint8_t SHA2_256_uint8_ht;
+
+typedef uint32_t SHA2_256_uint32_ht;
+
+typedef uint32_t *SHA2_256_uint32_p;
+
+typedef uint8_t *SHA2_256_uint8_p;
+
+extern uint32_t SHA2_256_size_hash;
+
+extern uint32_t SHA2_256_size_block;
+
+extern uint32_t SHA2_256_size_state;
+
+void SHA2_256_init(uint32_t *state);
+
+void SHA2_256_update(uint32_t *state, uint8_t *data_8);
+
+void SHA2_256_update_multi(uint32_t *state, uint8_t *data, uint32_t n1);
+
+void SHA2_256_update_last(uint32_t *state, uint8_t *data, uint32_t len);
+
+void SHA2_256_finish(uint32_t *state, uint8_t *hash1);
+
+void SHA2_256_hash(uint8_t *hash1, uint8_t *input, uint32_t len);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/SHA2_384.c b/sw/airborne/modules/datalink/hacl-c/SHA2_384.c
new file mode 100644
index 0000000000..df20c7e469
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/SHA2_384.c
@@ -0,0 +1,346 @@
+#include "SHA2_384.h"
+
+static void
+Hacl_Hash_Lib_LoadStore_uint64s_from_be_bytes(uint64_t *output, uint8_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *x0 = input + (uint32_t )8 * i;
+ uint64_t inputi = load64_be(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Hash_Lib_LoadStore_uint64s_to_be_bytes(uint8_t *output, uint64_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint64_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t )8 * i;
+ store64_be(x0, hd1);
+ }
+}
+
+static void Hacl_Hash_SHA2_384_init(uint64_t *state)
+{
+ (void )(state + (uint32_t )168);
+ uint64_t *k1 = state;
+ uint64_t *h_01 = state + (uint32_t )160;
+ uint64_t *p10 = k1;
+ uint64_t *p20 = k1 + (uint32_t )16;
+ uint64_t *p3 = k1 + (uint32_t )32;
+ uint64_t *p4 = k1 + (uint32_t )48;
+ uint64_t *p5 = k1 + (uint32_t )64;
+ uint64_t *p11 = p10;
+ uint64_t *p21 = p10 + (uint32_t )8;
+ uint64_t *p12 = p11;
+ uint64_t *p22 = p11 + (uint32_t )4;
+ p12[0] = (uint64_t )0x428a2f98d728ae22;
+ p12[1] = (uint64_t )0x7137449123ef65cd;
+ p12[2] = (uint64_t )0xb5c0fbcfec4d3b2f;
+ p12[3] = (uint64_t )0xe9b5dba58189dbbc;
+ p22[0] = (uint64_t )0x3956c25bf348b538;
+ p22[1] = (uint64_t )0x59f111f1b605d019;
+ p22[2] = (uint64_t )0x923f82a4af194f9b;
+ p22[3] = (uint64_t )0xab1c5ed5da6d8118;
+ uint64_t *p13 = p21;
+ uint64_t *p23 = p21 + (uint32_t )4;
+ p13[0] = (uint64_t )0xd807aa98a3030242;
+ p13[1] = (uint64_t )0x12835b0145706fbe;
+ p13[2] = (uint64_t )0x243185be4ee4b28c;
+ p13[3] = (uint64_t )0x550c7dc3d5ffb4e2;
+ p23[0] = (uint64_t )0x72be5d74f27b896f;
+ p23[1] = (uint64_t )0x80deb1fe3b1696b1;
+ p23[2] = (uint64_t )0x9bdc06a725c71235;
+ p23[3] = (uint64_t )0xc19bf174cf692694;
+ uint64_t *p14 = p20;
+ uint64_t *p24 = p20 + (uint32_t )8;
+ uint64_t *p15 = p14;
+ uint64_t *p25 = p14 + (uint32_t )4;
+ p15[0] = (uint64_t )0xe49b69c19ef14ad2;
+ p15[1] = (uint64_t )0xefbe4786384f25e3;
+ p15[2] = (uint64_t )0x0fc19dc68b8cd5b5;
+ p15[3] = (uint64_t )0x240ca1cc77ac9c65;
+ p25[0] = (uint64_t )0x2de92c6f592b0275;
+ p25[1] = (uint64_t )0x4a7484aa6ea6e483;
+ p25[2] = (uint64_t )0x5cb0a9dcbd41fbd4;
+ p25[3] = (uint64_t )0x76f988da831153b5;
+ uint64_t *p16 = p24;
+ uint64_t *p26 = p24 + (uint32_t )4;
+ p16[0] = (uint64_t )0x983e5152ee66dfab;
+ p16[1] = (uint64_t )0xa831c66d2db43210;
+ p16[2] = (uint64_t )0xb00327c898fb213f;
+ p16[3] = (uint64_t )0xbf597fc7beef0ee4;
+ p26[0] = (uint64_t )0xc6e00bf33da88fc2;
+ p26[1] = (uint64_t )0xd5a79147930aa725;
+ p26[2] = (uint64_t )0x06ca6351e003826f;
+ p26[3] = (uint64_t )0x142929670a0e6e70;
+ uint64_t *p17 = p3;
+ uint64_t *p27 = p3 + (uint32_t )8;
+ uint64_t *p18 = p17;
+ uint64_t *p28 = p17 + (uint32_t )4;
+ p18[0] = (uint64_t )0x27b70a8546d22ffc;
+ p18[1] = (uint64_t )0x2e1b21385c26c926;
+ p18[2] = (uint64_t )0x4d2c6dfc5ac42aed;
+ p18[3] = (uint64_t )0x53380d139d95b3df;
+ p28[0] = (uint64_t )0x650a73548baf63de;
+ p28[1] = (uint64_t )0x766a0abb3c77b2a8;
+ p28[2] = (uint64_t )0x81c2c92e47edaee6;
+ p28[3] = (uint64_t )0x92722c851482353b;
+ uint64_t *p19 = p27;
+ uint64_t *p29 = p27 + (uint32_t )4;
+ p19[0] = (uint64_t )0xa2bfe8a14cf10364;
+ p19[1] = (uint64_t )0xa81a664bbc423001;
+ p19[2] = (uint64_t )0xc24b8b70d0f89791;
+ p19[3] = (uint64_t )0xc76c51a30654be30;
+ p29[0] = (uint64_t )0xd192e819d6ef5218;
+ p29[1] = (uint64_t )0xd69906245565a910;
+ p29[2] = (uint64_t )0xf40e35855771202a;
+ p29[3] = (uint64_t )0x106aa07032bbd1b8;
+ uint64_t *p110 = p4;
+ uint64_t *p210 = p4 + (uint32_t )8;
+ uint64_t *p111 = p110;
+ uint64_t *p211 = p110 + (uint32_t )4;
+ p111[0] = (uint64_t )0x19a4c116b8d2d0c8;
+ p111[1] = (uint64_t )0x1e376c085141ab53;
+ p111[2] = (uint64_t )0x2748774cdf8eeb99;
+ p111[3] = (uint64_t )0x34b0bcb5e19b48a8;
+ p211[0] = (uint64_t )0x391c0cb3c5c95a63;
+ p211[1] = (uint64_t )0x4ed8aa4ae3418acb;
+ p211[2] = (uint64_t )0x5b9cca4f7763e373;
+ p211[3] = (uint64_t )0x682e6ff3d6b2b8a3;
+ uint64_t *p112 = p210;
+ uint64_t *p212 = p210 + (uint32_t )4;
+ p112[0] = (uint64_t )0x748f82ee5defb2fc;
+ p112[1] = (uint64_t )0x78a5636f43172f60;
+ p112[2] = (uint64_t )0x84c87814a1f0ab72;
+ p112[3] = (uint64_t )0x8cc702081a6439ec;
+ p212[0] = (uint64_t )0x90befffa23631e28;
+ p212[1] = (uint64_t )0xa4506cebde82bde9;
+ p212[2] = (uint64_t )0xbef9a3f7b2c67915;
+ p212[3] = (uint64_t )0xc67178f2e372532b;
+ uint64_t *p113 = p5;
+ uint64_t *p213 = p5 + (uint32_t )8;
+ uint64_t *p1 = p113;
+ uint64_t *p214 = p113 + (uint32_t )4;
+ p1[0] = (uint64_t )0xca273eceea26619c;
+ p1[1] = (uint64_t )0xd186b8c721c0c207;
+ p1[2] = (uint64_t )0xeada7dd6cde0eb1e;
+ p1[3] = (uint64_t )0xf57d4f7fee6ed178;
+ p214[0] = (uint64_t )0x06f067aa72176fba;
+ p214[1] = (uint64_t )0x0a637dc5a2c898a6;
+ p214[2] = (uint64_t )0x113f9804bef90dae;
+ p214[3] = (uint64_t )0x1b710b35131c471b;
+ uint64_t *p114 = p213;
+ uint64_t *p215 = p213 + (uint32_t )4;
+ p114[0] = (uint64_t )0x28db77f523047d84;
+ p114[1] = (uint64_t )0x32caab7b40c72493;
+ p114[2] = (uint64_t )0x3c9ebe0a15c9bebc;
+ p114[3] = (uint64_t )0x431d67c49c100d4c;
+ p215[0] = (uint64_t )0x4cc5d4becb3e42b6;
+ p215[1] = (uint64_t )0x597f299cfc657e2a;
+ p215[2] = (uint64_t )0x5fcb6fab3ad6faec;
+ p215[3] = (uint64_t )0x6c44198c4a475817;
+ uint64_t *p115 = h_01;
+ uint64_t *p2 = h_01 + (uint32_t )4;
+ p115[0] = (uint64_t )0xcbbb9d5dc1059ed8;
+ p115[1] = (uint64_t )0x629a292a367cd507;
+ p115[2] = (uint64_t )0x9159015a3070dd17;
+ p115[3] = (uint64_t )0x152fecd8f70e5939;
+ p2[0] = (uint64_t )0x67332667ffc00b31;
+ p2[1] = (uint64_t )0x8eb44a8768581511;
+ p2[2] = (uint64_t )0xdb0c2e0d64f98fa7;
+ p2[3] = (uint64_t )0x47b5481dbefa4fa4;
+}
+
+static void Hacl_Hash_SHA2_384_update(uint64_t *state, uint8_t *data)
+{
+ KRML_CHECK_SIZE((uint64_t )(uint32_t )0, (uint32_t )16);
+ uint64_t data_w[16];
+ for (uintmax_t _i = 0; _i < (uint32_t )16; ++_i)
+ data_w[_i] = (uint64_t )(uint32_t )0;
+ Hacl_Hash_Lib_LoadStore_uint64s_from_be_bytes(data_w, data, (uint32_t )16);
+ uint64_t *hash_w = state + (uint32_t )160;
+ uint64_t *ws_w = state + (uint32_t )80;
+ uint64_t *k_w = state;
+ uint64_t *counter_w = state + (uint32_t )168;
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint64_t uu____242 = data_w[i];
+ ws_w[i] = uu____242;
+ }
+ for (uint32_t i = (uint32_t )16; i < (uint32_t )80; i = i + (uint32_t )1)
+ {
+ uint64_t t16 = ws_w[i - (uint32_t )16];
+ uint64_t t15 = ws_w[i - (uint32_t )15];
+ uint64_t t7 = ws_w[i - (uint32_t )7];
+ uint64_t t2 = ws_w[i - (uint32_t )2];
+ ws_w[i] =
+ ((t2 >> (uint32_t )19 | t2 << (uint32_t )64 - (uint32_t )19)
+ ^ (t2 >> (uint32_t )61 | t2 << (uint32_t )64 - (uint32_t )61) ^ t2 >> (uint32_t )6)
+ +
+ t7
+ +
+ ((t15 >> (uint32_t )1 | t15 << (uint32_t )64 - (uint32_t )1)
+ ^ (t15 >> (uint32_t )8 | t15 << (uint32_t )64 - (uint32_t )8) ^ t15 >> (uint32_t )7)
+ + t16;
+ }
+ uint64_t hash_0[8] = { 0 };
+ memcpy(hash_0, hash_w, (uint32_t )8 * sizeof hash_w[0]);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )80; i = i + (uint32_t )1)
+ {
+ uint64_t a = hash_0[0];
+ uint64_t b = hash_0[1];
+ uint64_t c = hash_0[2];
+ uint64_t d = hash_0[3];
+ uint64_t e = hash_0[4];
+ uint64_t f1 = hash_0[5];
+ uint64_t g = hash_0[6];
+ uint64_t h = hash_0[7];
+ uint64_t k_t = k_w[i];
+ uint64_t ws_t = ws_w[i];
+ uint64_t
+ t1 =
+ h
+ +
+ ((e >> (uint32_t )14 | e << (uint32_t )64 - (uint32_t )14)
+ ^
+ (e >> (uint32_t )18 | e << (uint32_t )64 - (uint32_t )18)
+ ^ (e >> (uint32_t )41 | e << (uint32_t )64 - (uint32_t )41))
+ + (e & f1 ^ ~e & g)
+ + k_t
+ + ws_t;
+ uint64_t
+ t2 =
+ ((a >> (uint32_t )28 | a << (uint32_t )64 - (uint32_t )28)
+ ^
+ (a >> (uint32_t )34 | a << (uint32_t )64 - (uint32_t )34)
+ ^ (a >> (uint32_t )39 | a << (uint32_t )64 - (uint32_t )39))
+ + (a & b ^ a & c ^ b & c);
+ uint64_t x1 = t1 + t2;
+ uint64_t x5 = d + t1;
+ uint64_t *p1 = hash_0;
+ uint64_t *p2 = hash_0 + (uint32_t )4;
+ p1[0] = x1;
+ p1[1] = a;
+ p1[2] = b;
+ p1[3] = c;
+ p2[0] = x5;
+ p2[1] = e;
+ p2[2] = f1;
+ p2[3] = g;
+ }
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )8; i = i + (uint32_t )1)
+ {
+ uint64_t uu____871 = hash_w[i];
+ uint64_t uu____874 = hash_0[i];
+ uint64_t uu____870 = uu____871 + uu____874;
+ hash_w[i] = uu____870;
+ }
+ uint64_t c0 = counter_w[0];
+ uint64_t one1 = (uint64_t )(uint32_t )1;
+ counter_w[0] = c0 + one1;
+}
+
+static void Hacl_Hash_SHA2_384_update_multi(uint64_t *state, uint8_t *data, uint32_t n1)
+{
+ for (uint32_t i = (uint32_t )0; i < n1; i = i + (uint32_t )1)
+ {
+ uint8_t *b = data + i * (uint32_t )128;
+ Hacl_Hash_SHA2_384_update(state, b);
+ }
+}
+
+static void Hacl_Hash_SHA2_384_update_last(uint64_t *state, uint8_t *data, uint64_t len)
+{
+ uint8_t blocks[256] = { 0 };
+ uint32_t nb;
+ if (len < (uint64_t )112)
+ nb = (uint32_t )1;
+ else
+ nb = (uint32_t )2;
+ uint8_t *final_blocks;
+ if (len < (uint64_t )112)
+ final_blocks = blocks + (uint32_t )128;
+ else
+ final_blocks = blocks;
+ memcpy(final_blocks, data, (uint32_t )len * sizeof data[0]);
+ uint64_t n1 = state[168];
+ uint8_t *padding = final_blocks + (uint32_t )len;
+ FStar_UInt128_t
+ encodedlen =
+ FStar_UInt128_shift_left(FStar_UInt128_add(FStar_UInt128_mul_wide(n1,
+ (uint64_t )(uint32_t )128),
+ FStar_Int_Cast_Full_uint64_to_uint128(len)),
+ (uint32_t )3);
+ uint32_t
+ pad0len =
+ ((uint32_t )128 - ((uint32_t )len + (uint32_t )16 + (uint32_t )1) % (uint32_t )128)
+ % (uint32_t )128;
+ uint8_t *buf1 = padding;
+ (void )(padding + (uint32_t )1);
+ uint8_t *buf2 = padding + (uint32_t )1 + pad0len;
+ buf1[0] = (uint8_t )0x80;
+ store128_be(buf2, encodedlen);
+ Hacl_Hash_SHA2_384_update_multi(state, final_blocks, nb);
+}
+
+static void Hacl_Hash_SHA2_384_finish(uint64_t *state, uint8_t *hash1)
+{
+ uint64_t *hash_w = state + (uint32_t )160;
+ Hacl_Hash_Lib_LoadStore_uint64s_to_be_bytes(hash1, hash_w, (uint32_t )6);
+}
+
+static void Hacl_Hash_SHA2_384_hash(uint8_t *hash1, uint8_t *input, uint32_t len)
+{
+ KRML_CHECK_SIZE((uint64_t )(uint32_t )0, (uint32_t )169);
+ uint64_t state[169];
+ for (uintmax_t _i = 0; _i < (uint32_t )169; ++_i)
+ state[_i] = (uint64_t )(uint32_t )0;
+ uint32_t n1 = len / (uint32_t )128;
+ uint32_t r = len % (uint32_t )128;
+ uint8_t *input_blocks = input;
+ uint8_t *input_last = input + n1 * (uint32_t )128;
+ Hacl_Hash_SHA2_384_init(state);
+ Hacl_Hash_SHA2_384_update_multi(state, input_blocks, n1);
+ Hacl_Hash_SHA2_384_update_last(state, input_last, (uint64_t )r);
+ Hacl_Hash_SHA2_384_finish(state, hash1);
+}
+
+uint32_t SHA2_384_size_hash = (uint32_t )48;
+
+uint32_t SHA2_384_size_block = (uint32_t )128;
+
+uint32_t SHA2_384_size_state = (uint32_t )169;
+
+void SHA2_384_init(uint64_t *state)
+{
+ Hacl_Hash_SHA2_384_init(state);
+}
+
+void SHA2_384_update(uint64_t *state, uint8_t *data_8)
+{
+ Hacl_Hash_SHA2_384_update(state, data_8);
+}
+
+void SHA2_384_update_multi(uint64_t *state, uint8_t *data, uint32_t n1)
+{
+ Hacl_Hash_SHA2_384_update_multi(state, data, n1);
+}
+
+void SHA2_384_update_last(uint64_t *state, uint8_t *data, uint64_t len)
+{
+ Hacl_Hash_SHA2_384_update_last(state, data, len);
+}
+
+void SHA2_384_finish(uint64_t *state, uint8_t *hash1)
+{
+ Hacl_Hash_SHA2_384_finish(state, hash1);
+}
+
+void SHA2_384_hash(uint8_t *hash1, uint8_t *input, uint32_t len)
+{
+ Hacl_Hash_SHA2_384_hash(hash1, input, len);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/SHA2_384.h b/sw/airborne/modules/datalink/hacl-c/SHA2_384.h
new file mode 100644
index 0000000000..e02c44dcaf
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/SHA2_384.h
@@ -0,0 +1,79 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __SHA2_384_H
+#define __SHA2_384_H
+
+
+
+#include "testlib.h"
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_t;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_t;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_t;
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_ht;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_ht;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_ht;
+
+typedef uint8_t *Hacl_Hash_Lib_Create_uint8_p;
+
+typedef uint32_t *Hacl_Hash_Lib_Create_uint32_p;
+
+typedef uint64_t *Hacl_Hash_Lib_Create_uint64_p;
+
+typedef uint8_t *Hacl_Hash_Lib_LoadStore_uint8_p;
+
+typedef uint8_t Hacl_Hash_SHA2_384_uint8_t;
+
+typedef uint32_t Hacl_Hash_SHA2_384_uint32_t;
+
+typedef uint64_t Hacl_Hash_SHA2_384_uint64_t;
+
+typedef uint8_t Hacl_Hash_SHA2_384_uint8_ht;
+
+typedef uint32_t Hacl_Hash_SHA2_384_uint32_ht;
+
+typedef uint64_t Hacl_Hash_SHA2_384_uint64_ht;
+
+typedef FStar_UInt128_t Hacl_Hash_SHA2_384_uint128_ht;
+
+typedef uint64_t *Hacl_Hash_SHA2_384_uint64_p;
+
+typedef uint8_t *Hacl_Hash_SHA2_384_uint8_p;
+
+typedef uint8_t SHA2_384_uint8_t;
+
+typedef uint32_t SHA2_384_uint32_t;
+
+typedef uint64_t SHA2_384_uint64_t;
+
+typedef uint8_t SHA2_384_uint8_ht;
+
+typedef uint64_t SHA2_384_uint64_ht;
+
+typedef uint64_t *SHA2_384_uint64_p;
+
+typedef uint8_t *SHA2_384_uint8_p;
+
+extern uint32_t SHA2_384_size_hash;
+
+extern uint32_t SHA2_384_size_block;
+
+extern uint32_t SHA2_384_size_state;
+
+void SHA2_384_init(uint64_t *state);
+
+void SHA2_384_update(uint64_t *state, uint8_t *data_8);
+
+void SHA2_384_update_multi(uint64_t *state, uint8_t *data, uint32_t n1);
+
+void SHA2_384_update_last(uint64_t *state, uint8_t *data, uint64_t len);
+
+void SHA2_384_finish(uint64_t *state, uint8_t *hash1);
+
+void SHA2_384_hash(uint8_t *hash1, uint8_t *input, uint32_t len);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/SHA2_512.c b/sw/airborne/modules/datalink/hacl-c/SHA2_512.c
new file mode 100644
index 0000000000..de4b8e1e2e
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/SHA2_512.c
@@ -0,0 +1,368 @@
+#include "SHA2_512.h"
+
+static void
+Hacl_Hash_Lib_LoadStore_uint64s_from_be_bytes(uint64_t *output, uint8_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *x0 = input + (uint32_t )8 * i;
+ uint64_t inputi = load64_be(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Hash_Lib_LoadStore_uint64s_to_be_bytes(uint8_t *output, uint64_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint64_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t )8 * i;
+ store64_be(x0, hd1);
+ }
+}
+
+static void Hacl_Hash_SHA2_512_init(uint64_t *state)
+{
+ (void )(state + (uint32_t )168);
+ uint64_t *k1 = state;
+ uint64_t *h_01 = state + (uint32_t )160;
+ uint64_t *p10 = k1;
+ uint64_t *p20 = k1 + (uint32_t )16;
+ uint64_t *p3 = k1 + (uint32_t )32;
+ uint64_t *p4 = k1 + (uint32_t )48;
+ uint64_t *p5 = k1 + (uint32_t )64;
+ uint64_t *p11 = p10;
+ uint64_t *p21 = p10 + (uint32_t )8;
+ uint64_t *p12 = p11;
+ uint64_t *p22 = p11 + (uint32_t )4;
+ p12[0] = (uint64_t )0x428a2f98d728ae22;
+ p12[1] = (uint64_t )0x7137449123ef65cd;
+ p12[2] = (uint64_t )0xb5c0fbcfec4d3b2f;
+ p12[3] = (uint64_t )0xe9b5dba58189dbbc;
+ p22[0] = (uint64_t )0x3956c25bf348b538;
+ p22[1] = (uint64_t )0x59f111f1b605d019;
+ p22[2] = (uint64_t )0x923f82a4af194f9b;
+ p22[3] = (uint64_t )0xab1c5ed5da6d8118;
+ uint64_t *p13 = p21;
+ uint64_t *p23 = p21 + (uint32_t )4;
+ p13[0] = (uint64_t )0xd807aa98a3030242;
+ p13[1] = (uint64_t )0x12835b0145706fbe;
+ p13[2] = (uint64_t )0x243185be4ee4b28c;
+ p13[3] = (uint64_t )0x550c7dc3d5ffb4e2;
+ p23[0] = (uint64_t )0x72be5d74f27b896f;
+ p23[1] = (uint64_t )0x80deb1fe3b1696b1;
+ p23[2] = (uint64_t )0x9bdc06a725c71235;
+ p23[3] = (uint64_t )0xc19bf174cf692694;
+ uint64_t *p14 = p20;
+ uint64_t *p24 = p20 + (uint32_t )8;
+ uint64_t *p15 = p14;
+ uint64_t *p25 = p14 + (uint32_t )4;
+ p15[0] = (uint64_t )0xe49b69c19ef14ad2;
+ p15[1] = (uint64_t )0xefbe4786384f25e3;
+ p15[2] = (uint64_t )0x0fc19dc68b8cd5b5;
+ p15[3] = (uint64_t )0x240ca1cc77ac9c65;
+ p25[0] = (uint64_t )0x2de92c6f592b0275;
+ p25[1] = (uint64_t )0x4a7484aa6ea6e483;
+ p25[2] = (uint64_t )0x5cb0a9dcbd41fbd4;
+ p25[3] = (uint64_t )0x76f988da831153b5;
+ uint64_t *p16 = p24;
+ uint64_t *p26 = p24 + (uint32_t )4;
+ p16[0] = (uint64_t )0x983e5152ee66dfab;
+ p16[1] = (uint64_t )0xa831c66d2db43210;
+ p16[2] = (uint64_t )0xb00327c898fb213f;
+ p16[3] = (uint64_t )0xbf597fc7beef0ee4;
+ p26[0] = (uint64_t )0xc6e00bf33da88fc2;
+ p26[1] = (uint64_t )0xd5a79147930aa725;
+ p26[2] = (uint64_t )0x06ca6351e003826f;
+ p26[3] = (uint64_t )0x142929670a0e6e70;
+ uint64_t *p17 = p3;
+ uint64_t *p27 = p3 + (uint32_t )8;
+ uint64_t *p18 = p17;
+ uint64_t *p28 = p17 + (uint32_t )4;
+ p18[0] = (uint64_t )0x27b70a8546d22ffc;
+ p18[1] = (uint64_t )0x2e1b21385c26c926;
+ p18[2] = (uint64_t )0x4d2c6dfc5ac42aed;
+ p18[3] = (uint64_t )0x53380d139d95b3df;
+ p28[0] = (uint64_t )0x650a73548baf63de;
+ p28[1] = (uint64_t )0x766a0abb3c77b2a8;
+ p28[2] = (uint64_t )0x81c2c92e47edaee6;
+ p28[3] = (uint64_t )0x92722c851482353b;
+ uint64_t *p19 = p27;
+ uint64_t *p29 = p27 + (uint32_t )4;
+ p19[0] = (uint64_t )0xa2bfe8a14cf10364;
+ p19[1] = (uint64_t )0xa81a664bbc423001;
+ p19[2] = (uint64_t )0xc24b8b70d0f89791;
+ p19[3] = (uint64_t )0xc76c51a30654be30;
+ p29[0] = (uint64_t )0xd192e819d6ef5218;
+ p29[1] = (uint64_t )0xd69906245565a910;
+ p29[2] = (uint64_t )0xf40e35855771202a;
+ p29[3] = (uint64_t )0x106aa07032bbd1b8;
+ uint64_t *p110 = p4;
+ uint64_t *p210 = p4 + (uint32_t )8;
+ uint64_t *p111 = p110;
+ uint64_t *p211 = p110 + (uint32_t )4;
+ p111[0] = (uint64_t )0x19a4c116b8d2d0c8;
+ p111[1] = (uint64_t )0x1e376c085141ab53;
+ p111[2] = (uint64_t )0x2748774cdf8eeb99;
+ p111[3] = (uint64_t )0x34b0bcb5e19b48a8;
+ p211[0] = (uint64_t )0x391c0cb3c5c95a63;
+ p211[1] = (uint64_t )0x4ed8aa4ae3418acb;
+ p211[2] = (uint64_t )0x5b9cca4f7763e373;
+ p211[3] = (uint64_t )0x682e6ff3d6b2b8a3;
+ uint64_t *p112 = p210;
+ uint64_t *p212 = p210 + (uint32_t )4;
+ p112[0] = (uint64_t )0x748f82ee5defb2fc;
+ p112[1] = (uint64_t )0x78a5636f43172f60;
+ p112[2] = (uint64_t )0x84c87814a1f0ab72;
+ p112[3] = (uint64_t )0x8cc702081a6439ec;
+ p212[0] = (uint64_t )0x90befffa23631e28;
+ p212[1] = (uint64_t )0xa4506cebde82bde9;
+ p212[2] = (uint64_t )0xbef9a3f7b2c67915;
+ p212[3] = (uint64_t )0xc67178f2e372532b;
+ uint64_t *p113 = p5;
+ uint64_t *p213 = p5 + (uint32_t )8;
+ uint64_t *p1 = p113;
+ uint64_t *p214 = p113 + (uint32_t )4;
+ p1[0] = (uint64_t )0xca273eceea26619c;
+ p1[1] = (uint64_t )0xd186b8c721c0c207;
+ p1[2] = (uint64_t )0xeada7dd6cde0eb1e;
+ p1[3] = (uint64_t )0xf57d4f7fee6ed178;
+ p214[0] = (uint64_t )0x06f067aa72176fba;
+ p214[1] = (uint64_t )0x0a637dc5a2c898a6;
+ p214[2] = (uint64_t )0x113f9804bef90dae;
+ p214[3] = (uint64_t )0x1b710b35131c471b;
+ uint64_t *p114 = p213;
+ uint64_t *p215 = p213 + (uint32_t )4;
+ p114[0] = (uint64_t )0x28db77f523047d84;
+ p114[1] = (uint64_t )0x32caab7b40c72493;
+ p114[2] = (uint64_t )0x3c9ebe0a15c9bebc;
+ p114[3] = (uint64_t )0x431d67c49c100d4c;
+ p215[0] = (uint64_t )0x4cc5d4becb3e42b6;
+ p215[1] = (uint64_t )0x597f299cfc657e2a;
+ p215[2] = (uint64_t )0x5fcb6fab3ad6faec;
+ p215[3] = (uint64_t )0x6c44198c4a475817;
+ uint64_t *p115 = h_01;
+ uint64_t *p2 = h_01 + (uint32_t )4;
+ p115[0] = (uint64_t )0x6a09e667f3bcc908;
+ p115[1] = (uint64_t )0xbb67ae8584caa73b;
+ p115[2] = (uint64_t )0x3c6ef372fe94f82b;
+ p115[3] = (uint64_t )0xa54ff53a5f1d36f1;
+ p2[0] = (uint64_t )0x510e527fade682d1;
+ p2[1] = (uint64_t )0x9b05688c2b3e6c1f;
+ p2[2] = (uint64_t )0x1f83d9abfb41bd6b;
+ p2[3] = (uint64_t )0x5be0cd19137e2179;
+}
+
+static void Hacl_Hash_SHA2_512_update(uint64_t *state, uint8_t *data)
+{
+ KRML_CHECK_SIZE((uint64_t )(uint32_t )0, (uint32_t )16);
+ uint64_t data_w[16];
+ for (uintmax_t _i = 0; _i < (uint32_t )16; ++_i)
+ data_w[_i] = (uint64_t )(uint32_t )0;
+ Hacl_Hash_Lib_LoadStore_uint64s_from_be_bytes(data_w, data, (uint32_t )16);
+ uint64_t *hash_w = state + (uint32_t )160;
+ uint64_t *ws_w = state + (uint32_t )80;
+ uint64_t *k_w = state;
+ uint64_t *counter_w = state + (uint32_t )168;
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint64_t uu____242 = data_w[i];
+ ws_w[i] = uu____242;
+ }
+ for (uint32_t i = (uint32_t )16; i < (uint32_t )80; i = i + (uint32_t )1)
+ {
+ uint64_t t16 = ws_w[i - (uint32_t )16];
+ uint64_t t15 = ws_w[i - (uint32_t )15];
+ uint64_t t7 = ws_w[i - (uint32_t )7];
+ uint64_t t2 = ws_w[i - (uint32_t )2];
+ ws_w[i] =
+ ((t2 >> (uint32_t )19 | t2 << (uint32_t )64 - (uint32_t )19)
+ ^ (t2 >> (uint32_t )61 | t2 << (uint32_t )64 - (uint32_t )61) ^ t2 >> (uint32_t )6)
+ +
+ t7
+ +
+ ((t15 >> (uint32_t )1 | t15 << (uint32_t )64 - (uint32_t )1)
+ ^ (t15 >> (uint32_t )8 | t15 << (uint32_t )64 - (uint32_t )8) ^ t15 >> (uint32_t )7)
+ + t16;
+ }
+ uint64_t hash_0[8] = { 0 };
+ memcpy(hash_0, hash_w, (uint32_t )8 * sizeof hash_w[0]);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )80; i = i + (uint32_t )1)
+ {
+ uint64_t a = hash_0[0];
+ uint64_t b = hash_0[1];
+ uint64_t c = hash_0[2];
+ uint64_t d = hash_0[3];
+ uint64_t e = hash_0[4];
+ uint64_t f1 = hash_0[5];
+ uint64_t g = hash_0[6];
+ uint64_t h = hash_0[7];
+ uint64_t k_t = k_w[i];
+ uint64_t ws_t = ws_w[i];
+ uint64_t
+ t1 =
+ h
+ +
+ ((e >> (uint32_t )14 | e << (uint32_t )64 - (uint32_t )14)
+ ^
+ (e >> (uint32_t )18 | e << (uint32_t )64 - (uint32_t )18)
+ ^ (e >> (uint32_t )41 | e << (uint32_t )64 - (uint32_t )41))
+ + (e & f1 ^ ~e & g)
+ + k_t
+ + ws_t;
+ uint64_t
+ t2 =
+ ((a >> (uint32_t )28 | a << (uint32_t )64 - (uint32_t )28)
+ ^
+ (a >> (uint32_t )34 | a << (uint32_t )64 - (uint32_t )34)
+ ^ (a >> (uint32_t )39 | a << (uint32_t )64 - (uint32_t )39))
+ + (a & b ^ a & c ^ b & c);
+ uint64_t x1 = t1 + t2;
+ uint64_t x5 = d + t1;
+ uint64_t *p1 = hash_0;
+ uint64_t *p2 = hash_0 + (uint32_t )4;
+ p1[0] = x1;
+ p1[1] = a;
+ p1[2] = b;
+ p1[3] = c;
+ p2[0] = x5;
+ p2[1] = e;
+ p2[2] = f1;
+ p2[3] = g;
+ }
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )8; i = i + (uint32_t )1)
+ {
+ uint64_t uu____871 = hash_w[i];
+ uint64_t uu____874 = hash_0[i];
+ uint64_t uu____870 = uu____871 + uu____874;
+ hash_w[i] = uu____870;
+ }
+ uint64_t c0 = counter_w[0];
+ uint64_t one1 = (uint64_t )(uint32_t )1;
+ counter_w[0] = c0 + one1;
+}
+
+static void Hacl_Hash_SHA2_512_update_multi(uint64_t *state, uint8_t *data, uint32_t n1)
+{
+ for (uint32_t i = (uint32_t )0; i < n1; i = i + (uint32_t )1)
+ {
+ uint8_t *b = data + i * (uint32_t )128;
+ Hacl_Hash_SHA2_512_update(state, b);
+ }
+}
+
+static void Hacl_Hash_SHA2_512_update_last(uint64_t *state, uint8_t *data, uint64_t len)
+{
+ uint8_t blocks[256] = { 0 };
+ uint32_t nb;
+ if (len < (uint64_t )112)
+ nb = (uint32_t )1;
+ else
+ nb = (uint32_t )2;
+ uint8_t *final_blocks;
+ if (len < (uint64_t )112)
+ final_blocks = blocks + (uint32_t )128;
+ else
+ final_blocks = blocks;
+ memcpy(final_blocks, data, (uint32_t )len * sizeof data[0]);
+ uint64_t n1 = state[168];
+ uint8_t *padding = final_blocks + (uint32_t )len;
+ FStar_UInt128_t
+ encodedlen =
+ FStar_UInt128_shift_left(FStar_UInt128_add(FStar_UInt128_mul_wide(n1,
+ (uint64_t )(uint32_t )128),
+ FStar_Int_Cast_Full_uint64_to_uint128(len)),
+ (uint32_t )3);
+ uint32_t
+ pad0len = ((uint32_t )256 - ((uint32_t )len + (uint32_t )16 + (uint32_t )1)) % (uint32_t )128;
+ uint8_t *buf1 = padding;
+ (void )(padding + (uint32_t )1);
+ uint8_t *buf2 = padding + (uint32_t )1 + pad0len;
+ buf1[0] = (uint8_t )0x80;
+ store128_be(buf2, encodedlen);
+ Hacl_Hash_SHA2_512_update_multi(state, final_blocks, nb);
+}
+
+static void Hacl_Hash_SHA2_512_finish(uint64_t *state, uint8_t *hash1)
+{
+ uint64_t *hash_w = state + (uint32_t )160;
+ Hacl_Hash_Lib_LoadStore_uint64s_to_be_bytes(hash1, hash_w, (uint32_t )8);
+}
+
+static void Hacl_Hash_SHA2_512_hash(uint8_t *hash1, uint8_t *input, uint32_t len)
+{
+ KRML_CHECK_SIZE((uint64_t )(uint32_t )0, (uint32_t )169);
+ uint64_t state[169];
+ for (uintmax_t _i = 0; _i < (uint32_t )169; ++_i)
+ state[_i] = (uint64_t )(uint32_t )0;
+ uint32_t n1 = len / (uint32_t )128;
+ uint32_t r = len % (uint32_t )128;
+ uint8_t *input_blocks = input;
+ uint8_t *input_last = input + n1 * (uint32_t )128;
+ Hacl_Hash_SHA2_512_init(state);
+ Hacl_Hash_SHA2_512_update_multi(state, input_blocks, n1);
+ Hacl_Hash_SHA2_512_update_last(state, input_last, (uint64_t )r);
+ Hacl_Hash_SHA2_512_finish(state, hash1);
+}
+
+uint32_t SHA2_512_size_word = (uint32_t )8;
+
+uint32_t SHA2_512_size_hash_w = (uint32_t )8;
+
+uint32_t SHA2_512_size_block_w = (uint32_t )16;
+
+uint32_t SHA2_512_size_hash = (uint32_t )64;
+
+uint32_t SHA2_512_size_block = (uint32_t )128;
+
+uint32_t SHA2_512_size_k_w = (uint32_t )80;
+
+uint32_t SHA2_512_size_ws_w = (uint32_t )80;
+
+uint32_t SHA2_512_size_whash_w = (uint32_t )8;
+
+uint32_t SHA2_512_size_count_w = (uint32_t )1;
+
+uint32_t SHA2_512_size_len_8 = (uint32_t )16;
+
+uint32_t SHA2_512_size_state = (uint32_t )169;
+
+uint32_t SHA2_512_pos_k_w = (uint32_t )0;
+
+uint32_t SHA2_512_pos_ws_w = (uint32_t )80;
+
+uint32_t SHA2_512_pos_whash_w = (uint32_t )160;
+
+uint32_t SHA2_512_pos_count_w = (uint32_t )168;
+
+void SHA2_512_init(uint64_t *state)
+{
+ Hacl_Hash_SHA2_512_init(state);
+}
+
+void SHA2_512_update(uint64_t *state, uint8_t *data)
+{
+ Hacl_Hash_SHA2_512_update(state, data);
+}
+
+void SHA2_512_update_multi(uint64_t *state, uint8_t *data, uint32_t n1)
+{
+ Hacl_Hash_SHA2_512_update_multi(state, data, n1);
+}
+
+void SHA2_512_update_last(uint64_t *state, uint8_t *data, uint64_t len)
+{
+ Hacl_Hash_SHA2_512_update_last(state, data, len);
+}
+
+void SHA2_512_finish(uint64_t *state, uint8_t *hash1)
+{
+ Hacl_Hash_SHA2_512_finish(state, hash1);
+}
+
+void SHA2_512_hash(uint8_t *hash1, uint8_t *input, uint32_t len)
+{
+ Hacl_Hash_SHA2_512_hash(hash1, input, len);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/SHA2_512.h b/sw/airborne/modules/datalink/hacl-c/SHA2_512.h
new file mode 100644
index 0000000000..ecc1cd9326
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/SHA2_512.h
@@ -0,0 +1,107 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __SHA2_512_H
+#define __SHA2_512_H
+
+
+
+#include "testlib.h"
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_t;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_t;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_t;
+
+typedef uint8_t Hacl_Hash_Lib_Create_uint8_ht;
+
+typedef uint32_t Hacl_Hash_Lib_Create_uint32_ht;
+
+typedef uint64_t Hacl_Hash_Lib_Create_uint64_ht;
+
+typedef uint8_t *Hacl_Hash_Lib_Create_uint8_p;
+
+typedef uint32_t *Hacl_Hash_Lib_Create_uint32_p;
+
+typedef uint64_t *Hacl_Hash_Lib_Create_uint64_p;
+
+typedef uint8_t *Hacl_Hash_Lib_LoadStore_uint8_p;
+
+typedef uint8_t Hacl_Hash_SHA2_512_uint8_t;
+
+typedef uint32_t Hacl_Hash_SHA2_512_uint32_t;
+
+typedef uint64_t Hacl_Hash_SHA2_512_uint64_t;
+
+typedef uint8_t Hacl_Hash_SHA2_512_uint8_ht;
+
+typedef uint32_t Hacl_Hash_SHA2_512_uint32_ht;
+
+typedef uint64_t Hacl_Hash_SHA2_512_uint64_ht;
+
+typedef FStar_UInt128_t Hacl_Hash_SHA2_512_uint128_ht;
+
+typedef uint64_t *Hacl_Hash_SHA2_512_uint64_p;
+
+typedef uint8_t *Hacl_Hash_SHA2_512_uint8_p;
+
+typedef uint8_t SHA2_512_uint8_t;
+
+typedef uint32_t SHA2_512_uint32_t;
+
+typedef uint64_t SHA2_512_uint64_t;
+
+typedef uint8_t SHA2_512_uint8_ht;
+
+typedef uint32_t SHA2_512_uint32_ht;
+
+typedef uint64_t SHA2_512_uint64_ht;
+
+typedef FStar_UInt128_t SHA2_512_uint128_ht;
+
+typedef uint64_t *SHA2_512_uint64_p;
+
+typedef uint8_t *SHA2_512_uint8_p;
+
+extern uint32_t SHA2_512_size_word;
+
+extern uint32_t SHA2_512_size_hash_w;
+
+extern uint32_t SHA2_512_size_block_w;
+
+extern uint32_t SHA2_512_size_hash;
+
+extern uint32_t SHA2_512_size_block;
+
+extern uint32_t SHA2_512_size_k_w;
+
+extern uint32_t SHA2_512_size_ws_w;
+
+extern uint32_t SHA2_512_size_whash_w;
+
+extern uint32_t SHA2_512_size_count_w;
+
+extern uint32_t SHA2_512_size_len_8;
+
+extern uint32_t SHA2_512_size_state;
+
+extern uint32_t SHA2_512_pos_k_w;
+
+extern uint32_t SHA2_512_pos_ws_w;
+
+extern uint32_t SHA2_512_pos_whash_w;
+
+extern uint32_t SHA2_512_pos_count_w;
+
+void SHA2_512_init(uint64_t *state);
+
+void SHA2_512_update(uint64_t *state, uint8_t *data);
+
+void SHA2_512_update_multi(uint64_t *state, uint8_t *data, uint32_t n1);
+
+void SHA2_512_update_last(uint64_t *state, uint8_t *data, uint64_t len);
+
+void SHA2_512_finish(uint64_t *state, uint8_t *hash1);
+
+void SHA2_512_hash(uint8_t *hash1, uint8_t *input, uint32_t len);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/Salsa20.c b/sw/airborne/modules/datalink/hacl-c/Salsa20.c
new file mode 100644
index 0000000000..1a2c4d96f8
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Salsa20.c
@@ -0,0 +1,369 @@
+#include "Salsa20.h"
+
+static void
+Hacl_Lib_Create_make_h32_4(uint32_t *b, uint32_t s0, uint32_t s1, uint32_t s2, uint32_t s3)
+{
+ b[0] = s0;
+ b[1] = s1;
+ b[2] = s2;
+ b[3] = s3;
+}
+
+static void
+Hacl_Lib_Create_make_h32_8(
+ uint32_t *b,
+ uint32_t s0,
+ uint32_t s1,
+ uint32_t s2,
+ uint32_t s3,
+ uint32_t s4,
+ uint32_t s5,
+ uint32_t s6,
+ uint32_t s7
+)
+{
+ Hacl_Lib_Create_make_h32_4(b, s0, s1, s2, s3);
+ Hacl_Lib_Create_make_h32_4(b + (uint32_t )4, s4, s5, s6, s7);
+}
+
+static void
+Hacl_Lib_Create_make_h32_16(
+ uint32_t *b,
+ uint32_t s0,
+ uint32_t s1,
+ uint32_t s2,
+ uint32_t s3,
+ uint32_t s4,
+ uint32_t s5,
+ uint32_t s6,
+ uint32_t s7,
+ uint32_t s8,
+ uint32_t s9,
+ uint32_t s10,
+ uint32_t s11,
+ uint32_t s12,
+ uint32_t s13,
+ uint32_t s14,
+ uint32_t s15
+)
+{
+ Hacl_Lib_Create_make_h32_8(b, s0, s1, s2, s3, s4, s5, s6, s7);
+ Hacl_Lib_Create_make_h32_8(b + (uint32_t )8, s8, s9, s10, s11, s12, s13, s14, s15);
+}
+
+static void
+Hacl_Lib_LoadStore32_uint32s_from_le_bytes(uint32_t *output, uint8_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *x0 = input + (uint32_t )4 * i;
+ uint32_t inputi = load32_le(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Lib_LoadStore32_uint32s_to_le_bytes(uint8_t *output, uint32_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint32_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t )4 * i;
+ store32_le(x0, hd1);
+ }
+}
+
+inline static void Hacl_Impl_Salsa20_setup(uint32_t *st, uint8_t *k, uint8_t *n1, uint64_t c)
+{
+ uint32_t tmp[10] = { 0 };
+ uint32_t *k_ = tmp;
+ uint32_t *n_ = tmp + (uint32_t )8;
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(k_, k, (uint32_t )8);
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(n_, n1, (uint32_t )2);
+ uint32_t c0 = (uint32_t )c;
+ uint32_t c1 = (uint32_t )(c >> (uint32_t )32);
+ uint32_t k0 = k_[0];
+ uint32_t k1 = k_[1];
+ uint32_t k2 = k_[2];
+ uint32_t k3 = k_[3];
+ uint32_t k4 = k_[4];
+ uint32_t k5 = k_[5];
+ uint32_t k6 = k_[6];
+ uint32_t k7 = k_[7];
+ uint32_t n0 = n_[0];
+ uint32_t n11 = n_[1];
+ Hacl_Lib_Create_make_h32_16(st,
+ (uint32_t )0x61707865,
+ k0,
+ k1,
+ k2,
+ k3,
+ (uint32_t )0x3320646e,
+ n0,
+ n11,
+ c0,
+ c1,
+ (uint32_t )0x79622d32,
+ k4,
+ k5,
+ k6,
+ k7,
+ (uint32_t )0x6b206574);
+}
+
+inline static void
+Hacl_Impl_Salsa20_line(uint32_t *st, uint32_t a, uint32_t b, uint32_t d, uint32_t s)
+{
+ uint32_t sa = st[a];
+ uint32_t sb = st[b];
+ uint32_t sd = st[d];
+ uint32_t sbd = sb + sd;
+ uint32_t sbds = sbd << s | sbd >> (uint32_t )32 - s;
+ st[a] = sa ^ sbds;
+}
+
+inline static void
+Hacl_Impl_Salsa20_quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+ Hacl_Impl_Salsa20_line(st, b, a, d, (uint32_t )7);
+ Hacl_Impl_Salsa20_line(st, c, b, a, (uint32_t )9);
+ Hacl_Impl_Salsa20_line(st, d, c, b, (uint32_t )13);
+ Hacl_Impl_Salsa20_line(st, a, d, c, (uint32_t )18);
+}
+
+inline static void Hacl_Impl_Salsa20_double_round(uint32_t *st)
+{
+ Hacl_Impl_Salsa20_quarter_round(st, (uint32_t )0, (uint32_t )4, (uint32_t )8, (uint32_t )12);
+ Hacl_Impl_Salsa20_quarter_round(st, (uint32_t )5, (uint32_t )9, (uint32_t )13, (uint32_t )1);
+ Hacl_Impl_Salsa20_quarter_round(st, (uint32_t )10, (uint32_t )14, (uint32_t )2, (uint32_t )6);
+ Hacl_Impl_Salsa20_quarter_round(st, (uint32_t )15, (uint32_t )3, (uint32_t )7, (uint32_t )11);
+ Hacl_Impl_Salsa20_quarter_round(st, (uint32_t )0, (uint32_t )1, (uint32_t )2, (uint32_t )3);
+ Hacl_Impl_Salsa20_quarter_round(st, (uint32_t )5, (uint32_t )6, (uint32_t )7, (uint32_t )4);
+ Hacl_Impl_Salsa20_quarter_round(st, (uint32_t )10, (uint32_t )11, (uint32_t )8, (uint32_t )9);
+ Hacl_Impl_Salsa20_quarter_round(st,
+ (uint32_t )15,
+ (uint32_t )12,
+ (uint32_t )13,
+ (uint32_t )14);
+}
+
+inline static void Hacl_Impl_Salsa20_rounds(uint32_t *st)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )10; i = i + (uint32_t )1)
+ Hacl_Impl_Salsa20_double_round(st);
+}
+
+inline static void Hacl_Impl_Salsa20_sum_states(uint32_t *st, uint32_t *st_)
+{
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint32_t uu____871 = st[i];
+ uint32_t uu____874 = st_[i];
+ uint32_t uu____870 = uu____871 + uu____874;
+ st[i] = uu____870;
+ }
+}
+
+inline static void Hacl_Impl_Salsa20_copy_state(uint32_t *st, uint32_t *st_)
+{
+ memcpy(st, st_, (uint32_t )16 * sizeof st_[0]);
+}
+
+inline static void Hacl_Impl_Salsa20_salsa20_core(uint32_t *k, uint32_t *st, uint64_t ctr)
+{
+ uint32_t c0 = (uint32_t )ctr;
+ uint32_t c1 = (uint32_t )(ctr >> (uint32_t )32);
+ st[8] = c0;
+ st[9] = c1;
+ Hacl_Impl_Salsa20_copy_state(k, st);
+ Hacl_Impl_Salsa20_rounds(k);
+ Hacl_Impl_Salsa20_sum_states(k, st);
+}
+
+inline static void
+Hacl_Impl_Salsa20_salsa20_block(uint8_t *stream_block, uint32_t *st, uint64_t ctr)
+{
+ uint32_t st_[16] = { 0 };
+ Hacl_Impl_Salsa20_salsa20_core(st_, st, ctr);
+ Hacl_Lib_LoadStore32_uint32s_to_le_bytes(stream_block, st_, (uint32_t )16);
+}
+
+inline static void Hacl_Impl_Salsa20_init(uint32_t *st, uint8_t *k, uint8_t *n1)
+{
+ Hacl_Impl_Salsa20_setup(st, k, n1, (uint64_t )0);
+}
+
+static void
+Hacl_Impl_Salsa20_update_last(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint64_t ctr
+)
+{
+ uint8_t block[64] = { 0 };
+ Hacl_Impl_Salsa20_salsa20_block(block, st, ctr);
+ uint8_t *mask = block;
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t uu____602 = plain[i];
+ uint8_t uu____605 = mask[i];
+ uint8_t uu____601 = uu____602 ^ uu____605;
+ output[i] = uu____601;
+ }
+}
+
+static void
+Hacl_Impl_Salsa20_update(uint8_t *output, uint8_t *plain, uint32_t *st, uint64_t ctr)
+{
+ uint32_t b[48] = { 0 };
+ uint32_t *k = b;
+ uint32_t *ib = b + (uint32_t )16;
+ uint32_t *ob = b + (uint32_t )32;
+ Hacl_Impl_Salsa20_salsa20_core(k, st, ctr);
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(ib, plain, (uint32_t )16);
+ for (uint32_t i = (uint32_t )0; i < (uint32_t )16; i = i + (uint32_t )1)
+ {
+ uint32_t uu____602 = ib[i];
+ uint32_t uu____605 = k[i];
+ uint32_t uu____601 = uu____602 ^ uu____605;
+ ob[i] = uu____601;
+ }
+ Hacl_Lib_LoadStore32_uint32s_to_le_bytes(output, ob, (uint32_t )16);
+}
+
+static void
+Hacl_Impl_Salsa20_salsa20_counter_mode_blocks(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint64_t ctr
+)
+{
+ for (uint32_t i = (uint32_t )0; i < len; i = i + (uint32_t )1)
+ {
+ uint8_t *b = plain + (uint32_t )64 * i;
+ uint8_t *o = output + (uint32_t )64 * i;
+ Hacl_Impl_Salsa20_update(o, b, st, ctr + (uint64_t )i);
+ }
+}
+
+static void
+Hacl_Impl_Salsa20_salsa20_counter_mode(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint64_t ctr
+)
+{
+ uint32_t blocks_len = len >> (uint32_t )6;
+ uint32_t part_len = len & (uint32_t )0x3f;
+ uint8_t *output_ = output;
+ uint8_t *plain_ = plain;
+ uint8_t *output__ = output + (uint32_t )64 * blocks_len;
+ uint8_t *plain__ = plain + (uint32_t )64 * blocks_len;
+ Hacl_Impl_Salsa20_salsa20_counter_mode_blocks(output_, plain_, blocks_len, st, ctr);
+ if (part_len > (uint32_t )0)
+ Hacl_Impl_Salsa20_update_last(output__, plain__, part_len, st, ctr + (uint64_t )blocks_len);
+}
+
+static void
+Hacl_Impl_Salsa20_salsa20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint64_t ctr
+)
+{
+ uint32_t buf[16] = { 0 };
+ uint32_t *st = buf;
+ Hacl_Impl_Salsa20_init(st, k, n1);
+ Hacl_Impl_Salsa20_salsa20_counter_mode(output, plain, len, st, ctr);
+}
+
+inline static void Hacl_Impl_HSalsa20_setup(uint32_t *st, uint8_t *k, uint8_t *n1)
+{
+ uint32_t tmp[12] = { 0 };
+ uint32_t *k_ = tmp;
+ uint32_t *n_ = tmp + (uint32_t )8;
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(k_, k, (uint32_t )8);
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(n_, n1, (uint32_t )4);
+ uint32_t k0 = k_[0];
+ uint32_t k1 = k_[1];
+ uint32_t k2 = k_[2];
+ uint32_t k3 = k_[3];
+ uint32_t k4 = k_[4];
+ uint32_t k5 = k_[5];
+ uint32_t k6 = k_[6];
+ uint32_t k7 = k_[7];
+ uint32_t n0 = n_[0];
+ uint32_t n11 = n_[1];
+ uint32_t n2 = n_[2];
+ uint32_t n3 = n_[3];
+ Hacl_Lib_Create_make_h32_16(st,
+ (uint32_t )0x61707865,
+ k0,
+ k1,
+ k2,
+ k3,
+ (uint32_t )0x3320646e,
+ n0,
+ n11,
+ n2,
+ n3,
+ (uint32_t )0x79622d32,
+ k4,
+ k5,
+ k6,
+ k7,
+ (uint32_t )0x6b206574);
+}
+
+static void
+Hacl_Impl_HSalsa20_crypto_core_hsalsa20(uint8_t *output, uint8_t *nonce, uint8_t *key)
+{
+ uint32_t tmp[24] = { 0 };
+ uint32_t *st = tmp;
+ uint32_t *hs = tmp + (uint32_t )16;
+ Hacl_Impl_HSalsa20_setup(st, key, nonce);
+ Hacl_Impl_Salsa20_rounds(st);
+ uint32_t hs0 = st[0];
+ uint32_t hs1 = st[5];
+ uint32_t hs2 = st[10];
+ uint32_t hs3 = st[15];
+ uint32_t hs4 = st[6];
+ uint32_t hs5 = st[7];
+ uint32_t hs6 = st[8];
+ uint32_t hs7 = st[9];
+ Hacl_Lib_Create_make_h32_8(hs, hs0, hs1, hs2, hs3, hs4, hs5, hs6, hs7);
+ Hacl_Lib_LoadStore32_uint32s_to_le_bytes(output, hs, (uint32_t )8);
+}
+
+void *Salsa20_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b)
+{
+ return (void *)(uint8_t )0;
+}
+
+void
+Salsa20_salsa20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint64_t ctr
+)
+{
+ Hacl_Impl_Salsa20_salsa20(output, plain, len, k, n1, ctr);
+}
+
+void Salsa20_hsalsa20(uint8_t *output, uint8_t *key, uint8_t *nonce)
+{
+ Hacl_Impl_HSalsa20_crypto_core_hsalsa20(output, nonce, key);
+}
+
diff --git a/sw/airborne/modules/datalink/hacl-c/Salsa20.h b/sw/airborne/modules/datalink/hacl-c/Salsa20.h
new file mode 100644
index 0000000000..51a2bba3ac
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/Salsa20.h
@@ -0,0 +1,64 @@
+/* This file was auto-generated by KreMLin! */
+#include "kremlib.h"
+#ifndef __Salsa20_H
+#define __Salsa20_H
+
+
+
+#include "testlib.h"
+
+typedef uint32_t Hacl_Impl_Xor_Lemmas_u32;
+
+typedef uint8_t Hacl_Impl_Xor_Lemmas_u8;
+
+typedef uint32_t Hacl_Lib_Create_h32;
+
+typedef uint8_t *Hacl_Lib_LoadStore32_uint8_p;
+
+typedef uint32_t Hacl_Impl_Salsa20_u32;
+
+typedef uint32_t Hacl_Impl_Salsa20_h32;
+
+typedef uint8_t *Hacl_Impl_Salsa20_uint8_p;
+
+typedef uint32_t *Hacl_Impl_Salsa20_state;
+
+typedef uint32_t Hacl_Impl_Salsa20_idx;
+
+typedef struct
+{
+ void *k;
+ void *n;
+}
+Hacl_Impl_Salsa20_log_t_;
+
+typedef void *Hacl_Impl_Salsa20_log_t;
+
+typedef uint32_t Hacl_Impl_HSalsa20_h32;
+
+typedef uint32_t Hacl_Impl_HSalsa20_u32;
+
+typedef uint8_t *Hacl_Impl_HSalsa20_uint8_p;
+
+typedef uint32_t *Hacl_Impl_HSalsa20_state;
+
+typedef uint8_t *Salsa20_uint8_p;
+
+typedef uint32_t Salsa20_uint32_t;
+
+void *Salsa20_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b);
+
+typedef uint32_t *Salsa20_state;
+
+void
+Salsa20_salsa20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint64_t ctr
+);
+
+void Salsa20_hsalsa20(uint8_t *output, uint8_t *key, uint8_t *nonce);
+#endif
diff --git a/sw/airborne/modules/datalink/hacl-c/gcc_compat.h b/sw/airborne/modules/datalink/hacl-c/gcc_compat.h
new file mode 100644
index 0000000000..25cd5ee159
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/gcc_compat.h
@@ -0,0 +1,18 @@
+#ifndef __GCC_COMPAT_H
+#define __GCC_COMPAT_H
+
+#ifndef _MSC_VER
+ // Use the gcc predefined macros if on a platform/architectures that set them. Otherwise define them to be empty.
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+
+
+#endif // __GCC_COMPAT_H
diff --git a/sw/airborne/modules/datalink/hacl-c/kremlib.c b/sw/airborne/modules/datalink/hacl-c/kremlib.c
new file mode 100644
index 0000000000..8600b49896
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/kremlib.c
@@ -0,0 +1,52 @@
+#include "kremlib.h"
+#include
+
+#ifdef KREMLIB_EMBEDDED_TARGET /* bare-metal targets */
+int exit_success = 0;
+int exit_failure = 1;
+
+void print_string(const char *s __attribute__((unused))) {}
+void print_bytes(uint8_t *b, uint32_t len __attribute__((unused))) {}
+#else /* any other platform */
+
+int exit_success = EXIT_SUCCESS;
+int exit_failure = EXIT_FAILURE;
+
+
+void print_string(const char *s) {
+ printf("%s", s);
+}
+
+void print_bytes(uint8_t *b, uint32_t len) {
+ for (uint32_t i = 0; i < len; i++){
+ printf("%02x", b[i]);
+ }
+ printf("\n");
+}
+#endif
+
+void FStar_Buffer_recall(void *x) {}
+
+bool Prims_op_GreaterThanOrEqual(Prims_int x, Prims_int y) { KRML_EXIT; }
+bool Prims_op_LessThanOrEqual(Prims_int x, Prims_int y) { KRML_EXIT; }
+bool Prims_op_GreaterThan(Prims_int x, Prims_int y) { KRML_EXIT; }
+bool Prims_op_LessThan(Prims_int x, Prims_int y) { KRML_EXIT; }
+Prims_int Prims_pow2(Prims_int x) { KRML_EXIT; }
+Prims_int Prims_op_Multiply(Prims_int x, Prims_int y) { KRML_EXIT; }
+Prims_int Prims_op_Addition(Prims_int x, Prims_int y) { KRML_EXIT; }
+Prims_int Prims_op_Subtraction(Prims_int x, Prims_int y) { KRML_EXIT; }
+Prims_int Prims_op_Division(Prims_int x, Prims_int y) { KRML_EXIT; }
+Prims_int Prims_op_Modulus(Prims_int x, Prims_int y) { KRML_EXIT; }
+void *Prims_magic(void *_) { KRML_EXIT; }
+void *Prims_admit(void *_) { KRML_EXIT; }
+void *Prims____Cons___tl(void *_) { KRML_EXIT; }
+
+bool FStar_HyperStack_is_eternal_color(Prims_int x0) { KRML_EXIT; }
+
+FStar_Seq_Base_seq FStar_Seq_Base_append(FStar_Seq_Base_seq x, FStar_Seq_Base_seq y) { KRML_EXIT; }
+FStar_Seq_Base_seq FStar_Seq_Base_slice(FStar_Seq_Base_seq x, FStar_Seq_Base_seq y, Prims_nat z) {
+ KRML_EXIT;
+}
+
+Prims_int FStar_UInt32_v(uint32_t x) { return (void *)0; }
+FStar_UInt32_t FStar_UInt32_uint_to_t(Prims_nat x) { KRML_EXIT; }
diff --git a/sw/airborne/modules/datalink/hacl-c/kremlib.h b/sw/airborne/modules/datalink/hacl-c/kremlib.h
new file mode 100644
index 0000000000..9815a9145c
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/kremlib.h
@@ -0,0 +1,578 @@
+#ifndef __KREMLIB_H
+#define __KREMLIB_H
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+// Define __cdecl and friends when using GCC, so that we can safely compile code
+// that contains __cdecl on all platforms.
+#include "gcc_compat.h"
+
+// GCC-specific attribute syntax; everyone else gets the standard C inline
+// attribute.
+#ifdef __GNU_C__
+#ifndef __clang__
+#define force_inline inline __attribute__((always_inline))
+#else
+#define force_inline inline
+#endif
+#else
+#define force_inline inline
+#endif
+
+// Uppercase issue; we have to define lowercase version of the C macros (as we
+// have no way to refer to an uppercase *variable* in F*).
+extern int exit_success;
+extern int exit_failure;
+
+void print_string(const char *s);
+void print_bytes(uint8_t *b, uint32_t len);
+
+// If some globals need to be initialized before the main, then kremlin will
+// generate and try to link last a function with this type:
+void kremlinit_globals(void);
+
+// Buffers (FIXME remove eqb!)
+#define FStar_Buffer_eqb(b1, b2, n) \
+ (memcmp((b1), (b2), (n) * sizeof((b1)[0])) == 0)
+#define FStar_Buffer_to_seq_full(x) 0
+void FStar_Buffer_recall(void *x);
+
+// Some types that KreMLin has no special knowledge of; many of them appear in
+// signatures of ghost functions, meaning that it suffices to give them (any)
+// definition.
+typedef void *Prims_pos, *Prims_nat, *Prims_nonzero, *FStar_Seq_Base_seq,
+ *Prims_int, *Prims_prop, *FStar_HyperStack_mem, *FStar_Set_set,
+ *Prims_st_pre_h, *FStar_Heap_heap, *Prims_all_pre_h, *FStar_TSet_set,
+ *Prims_string, *Prims_list, *FStar_Map_t, *FStar_UInt63_t_, *FStar_Int63_t_,
+ *FStar_UInt63_t, *FStar_Int63_t, *FStar_UInt_uint_t, *FStar_Int_int_t,
+ *FStar_HyperStack_stackref, *FStar_Bytes_bytes, *FStar_HyperHeap_rid,
+ *FStar_Heap_aref, *FStar_Monotonic_Heap_heap,
+ *FStar_Monotonic_HyperHeap_rid, *FStar_Monotonic_HyperStack_mem;
+
+// Prims; all of the functions below abort;
+bool Prims_op_GreaterThanOrEqual(Prims_int x, Prims_int y);
+bool Prims_op_LessThanOrEqual(Prims_int x, Prims_int y);
+bool Prims_op_GreaterThan(Prims_int x, Prims_int y);
+bool Prims_op_LessThan(Prims_int x, Prims_int y);
+Prims_int Prims_pow2(Prims_int x);
+Prims_int Prims_op_Multiply(Prims_int x, Prims_int y);
+Prims_int Prims_op_Addition(Prims_int x, Prims_int y);
+Prims_int Prims_op_Subtraction(Prims_int x, Prims_int y);
+Prims_int Prims_op_Division(Prims_int x, Prims_int y);
+Prims_int Prims_op_Modulus(Prims_int x, Prims_int y);
+void *Prims_magic(void *_);
+void *Prims_admit(void *x);
+void *Prims____Cons___tl(void *_);
+
+// In statement position, exiting is easy.
+#define KRML_EXIT \
+ do { \
+ printf("Unimplemented function at %s:%d\n", __FILE__, __LINE__); \
+ exit(254); \
+ } while (0)
+
+// In expression position, use the comma-operator and a malloc to return an
+// expression of the right size. KreMLin passes t as the parameter to the macro.
+#define KRML_EABORT(t, msg) \
+ (printf("KreMLin abort at %s:%d\n%s\n", __FILE__, __LINE__, msg), exit(255), \
+ *((t *)malloc(sizeof(t))))
+
+#define KRML_CHECK_SIZE(elt, size) \
+ if (((size_t)size) > SIZE_MAX / sizeof(elt)) { \
+ printf("Maximum allocatable size exceeded, aborting before overflow at " \
+ "%s:%d\n", \
+ __FILE__, __LINE__); \
+ exit(253); \
+ }
+
+// Stubs to make ST happy. Important note: you must generate a use of the macro
+// argument, otherwise, you may have FStar_ST_recall(f) as the only use of f;
+// KreMLin will think that this is a valid use, but then the C compiler, after
+// macro expansion, will error out.
+bool FStar_HyperStack_is_eternal_color(Prims_int x0);
+#define FStar_Monotonic_HyperHeap_root 0
+#define FStar_HyperStack_ST_op_Colon_Equals(x, v) KRML_EXIT
+#define FStar_HyperStack_ST_op_Bang(x) 0
+#define FStar_HyperStack_ST_salloc(x) 0
+#define FStar_HyperStack_ST_ralloc(x, y) 0
+#define FStar_HyperStack_ST_new_region(x) 0
+#define FStar_HyperStack_ST_recall(x) \
+ do { \
+ (void)(x); \
+ } while (0)
+#define FStar_HyperStack_ST_recall_region(x) \
+ do { \
+ (void)(x); \
+ } while (0)
+
+#define FStar_Monotonic_RRef_m_recall(x1, x2) \
+ do { \
+ (void)(x1); \
+ (void)(x2); \
+ } while (0)
+#define FStar_Monotonic_RRef_m_write(x1, x2, x3, x4, x5) \
+ do { \
+ (void)(x1); \
+ (void)(x2); \
+ (void)(x3); \
+ (void)(x4); \
+ (void)(x5); \
+ } while (0)
+#define FStar_Monotonic_RRef_m_alloc(...) \
+ { 0 }
+
+#define FStar_HyperHeap_root 0
+
+// Misc; many of these are polymorphic, hence not extracted (yet) by Kremlin,
+// which means that a macro is the "right" way to make sure they don't generate
+// a compilation error.
+#define FStar_Pervasives_Native_fst(x) (x).fst
+#define FStar_Pervasives_Native_snd(x) (x).snd
+#define FStar_Seq_Base_createEmpty(x) 0
+#define FStar_Seq_Base_create(len, init) 0
+#define FStar_Seq_Base_upd(s, i, e) 0
+#define FStar_Seq_Base_eq(l1, l2) 0
+FStar_Seq_Base_seq FStar_Seq_Base_append(FStar_Seq_Base_seq x,
+ FStar_Seq_Base_seq y);
+FStar_Seq_Base_seq FStar_Seq_Base_slice(FStar_Seq_Base_seq x,
+ FStar_Seq_Base_seq y, Prims_nat z);
+#define FStar_Seq_Properties_snoc(x, y) 0
+#define FStar_Seq_Properties_cons(x, y) 0
+#define FStar_Seq_Base_index(x, y) 0
+
+// Endian-ness
+
+// ... for Linux
+#if defined(__linux__) || defined(__CYGWIN__)
+#include
+
+// ... for OSX
+#elif defined(__APPLE__)
+#include
+#define htole64(x) OSSwapHostToLittleInt64(x)
+#define le64toh(x) OSSwapLittleToHostInt64(x)
+#define htobe64(x) OSSwapHostToBigInt64(x)
+#define be64toh(x) OSSwapBigToHostInt64(x)
+
+#define htole16(x) OSSwapHostToLittleInt16(x)
+#define le16toh(x) OSSwapLittleToHostInt16(x)
+#define htobe16(x) OSSwapHostToBigInt16(x)
+#define be16toh(x) OSSwapBigToHostInt16(x)
+
+#define htole32(x) OSSwapHostToLittleInt32(x)
+#define le32toh(x) OSSwapLittleToHostInt32(x)
+#define htobe32(x) OSSwapHostToBigInt32(x)
+#define be32toh(x) OSSwapBigToHostInt32(x)
+
+// ... for Windows
+#elif (defined(_WIN16) || defined(_WIN32) || defined(_WIN64)) && \
+ !defined(__WINDOWS__)
+#include
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+
+#if defined(_MSC_VER)
+#include
+#define htobe16(x) _byteswap_ushort(x)
+#define htole16(x) (x)
+#define be16toh(x) _byteswap_ushort(x)
+#define le16toh(x) (x)
+
+#define htobe32(x) _byteswap_ulong(x)
+#define htole32(x) (x)
+#define be32toh(x) _byteswap_ulong(x)
+#define le32toh(x) (x)
+
+#define htobe64(x) _byteswap_uint64(x)
+#define htole64(x) (x)
+#define be64toh(x) _byteswap_uint64(x)
+#define le64toh(x) (x)
+
+#elif defined(__GNUC__) || defined(__clang__)
+
+#define htobe16(x) __builtin_bswap16(x)
+#define htole16(x) (x)
+#define be16toh(x) __builtin_bswap16(x)
+#define le16toh(x) (x)
+
+#define htobe32(x) __builtin_bswap32(x)
+#define htole32(x) (x)
+#define be32toh(x) __builtin_bswap32(x)
+#define le32toh(x) (x)
+
+#define htobe64(x) __builtin_bswap64(x)
+#define htole64(x) (x)
+#define be64toh(x) __builtin_bswap64(x)
+#define le64toh(x) (x)
+#endif
+
+#elif BYTE_ORDER == BIG_ENDIAN
+
+/* that would be xbox 360 */
+#define htobe16(x) (x)
+#define htole16(x) __builtin_bswap16(x)
+#define be16toh(x) (x)
+#define le16toh(x) __builtin_bswap16(x)
+
+#define htobe32(x) (x)
+#define htole32(x) __builtin_bswap32(x)
+#define be32toh(x) (x)
+#define le32toh(x) __builtin_bswap32(x)
+
+#define htobe64(x) (x)
+#define htole64(x) __builtin_bswap64(x)
+#define be64toh(x) (x)
+#define le64toh(x) __builtin_bswap64(x)
+
+#endif /* Windows */
+
+#else /* bare metal */
+
+#define __TESTLIB_H // don't include testlib.h for the build
+#define KREMLIB_EMBEDDED_TARGET // mark the use of a bare-metal target
+
+// define empty functions for printf() and exit() because they are present
+// in the autogenerated code
+#define printf
+#define exit(_x)
+
+#undef KRML_EXIT
+#define KRML_EXIT
+
+#undef KRML_EABORT
+#define KRML_EABORT(t, msg)
+
+#undef KRML_CHECK_SIZE
+#define KRML_CHECK_SIZE(elt, size)
+
+// byte order is typically defined in arm-none-eabi/include/machine/endian.h
+#if BYTE_ORDER == BIG_ENDIAN
+
+#define htobe32(x) (x)
+#define be32toh(x) (x)
+#define htole32(x) \
+ (__extension__ ({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | \
+ ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | \
+ ((_temp << 24) & 0xFF000000); \
+ }))
+#define le32toh(x) (htole32((x)))
+
+#define htobe64(x) (x)
+#define be64toh(x) (x)
+#define htole64(x) \
+ (__extension__ ({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+#define le64toh(x) (htole64((x)))
+
+#else /* assume little endian hardware (should be default for bare-metal) */
+
+#define htole32(x) (x)
+#define le32toh(x) (x)
+#define htobe32(x) \
+ (__extension__ ({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | \
+ ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | \
+ ((_temp << 24) & 0xFF000000); \
+ }))
+#define be32toh(x) (htobe32((x)))
+
+#define htole64(x) (x)
+#define le64toh(x) (x)
+#define htobe64(x) \
+ (__extension__ ({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+#define be64toh(x) (htobe64((x)))
+
+#endif /* BYTE_ORDER == BIG_ENDIAN */
+
+#endif // platform specific endiannes
+
+
+
+
+
+// Loads and stores. These avoid undefined behavior due to unaligned memory
+// accesses, via memcpy.
+
+inline static uint16_t load16(uint8_t *b) {
+ uint16_t x;
+ memcpy(&x, b, 2);
+ return x;
+}
+
+inline static uint32_t load32(uint8_t *b) {
+ uint32_t x;
+ memcpy(&x, b, 4);
+ return x;
+}
+
+inline static uint64_t load64(uint8_t *b) {
+ uint64_t x;
+ memcpy(&x, b, 8);
+ return x;
+}
+
+inline static void store16(uint8_t *b, uint16_t i) { memcpy(b, &i, 2); }
+
+inline static void store32(uint8_t *b, uint32_t i) { memcpy(b, &i, 4); }
+
+inline static void store64(uint8_t *b, uint64_t i) { memcpy(b, &i, 8); }
+
+
+#define load16_le(b) (le16toh(load16(b)))
+#define store16_le(b, i) (store16(b, htole16(i)))
+#define load16_be(b) (be16toh(load16(b)))
+#define store16_be(b, i) (store16(b, htobe16(i)))
+
+#define load32_le(b) (le32toh(load32(b)))
+#define store32_le(b, i) (store32(b, htole32(i)))
+#define load32_be(b) (be32toh(load32(b)))
+#define store32_be(b, i) (store32(b, htobe32(i)))
+
+#define load64_le(b) (le64toh(load64(b)))
+#define store64_le(b, i) (store64(b, htole64(i)))
+#define load64_be(b) (be64toh(load64(b)))
+#define store64_be(b, i) (store64(b, htobe64(i)))
+
+
+// Integer types
+typedef uint64_t FStar_UInt64_t, FStar_UInt64_t_;
+typedef int64_t FStar_Int64_t, FStar_Int64_t_;
+typedef uint32_t FStar_UInt32_t, FStar_UInt32_t_;
+typedef int32_t FStar_Int32_t, FStar_Int32_t_;
+typedef uint16_t FStar_UInt16_t, FStar_UInt16_t_;
+typedef int16_t FStar_Int16_t, FStar_Int16_t_;
+typedef uint8_t FStar_UInt8_t, FStar_UInt8_t_;
+typedef int8_t FStar_Int8_t, FStar_Int8_t_;
+
+// Random functions that may show up.
+Prims_int FStar_UInt32_v(uint32_t x);
+FStar_UInt32_t FStar_UInt32_uint_to_t(Prims_nat x);
+
+static inline uint32_t rotate32_left(uint32_t x, uint32_t n) {
+ // assert (n<32);
+ return (x << n) | (x >> (-n & 31));
+}
+static inline uint32_t rotate32_right(uint32_t x, uint32_t n) {
+ // assert (n<32);
+ return (x >> n) | (x << (-n & 31));
+}
+
+// Constant time comparisons
+static inline uint8_t FStar_UInt8_eq_mask(uint8_t x, uint8_t y) {
+ x = ~(x ^ y);
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return (int8_t)x >> 7;
+}
+
+static inline uint8_t FStar_UInt8_gte_mask(uint8_t x, uint8_t y) {
+ return ~(uint8_t)(((int32_t)x - y) >> 31);
+}
+
+static inline uint16_t FStar_UInt16_eq_mask(uint16_t x, uint16_t y) {
+ x = ~(x ^ y);
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return (int16_t)x >> 15;
+}
+
+static inline uint16_t FStar_UInt16_gte_mask(uint16_t x, uint16_t y) {
+ return ~(uint16_t)(((int32_t)x - y) >> 31);
+}
+
+static inline uint32_t FStar_UInt32_eq_mask(uint32_t x, uint32_t y) {
+ x = ~(x ^ y);
+ x &= x << 16;
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return ((int32_t)x) >> 31;
+}
+
+static inline uint32_t FStar_UInt32_gte_mask(uint32_t x, uint32_t y) {
+ return ~((uint32_t)(((int64_t)x - y) >> 63));
+}
+
+static inline uint64_t FStar_UInt64_eq_mask(uint64_t x, uint64_t y) {
+ x = ~(x ^ y);
+ x &= x << 32;
+ x &= x << 16;
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return ((int64_t)x) >> 63;
+}
+
+static inline uint64_t FStar_UInt64_gte_mask(uint64_t x, uint64_t y) {
+ uint64_t low63 =
+ ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x7fffffffffffffff)) -
+ (int64_t)(y & UINT64_C(0x7fffffffffffffff))) >>
+ 63));
+ uint64_t high_bit =
+ ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x8000000000000000)) -
+ (int64_t)(y & UINT64_C(0x8000000000000000))) >>
+ 63));
+ return low63 & high_bit;
+}
+
+// Platform-specific 128-bit arithmetic. These are static functions in a header,
+// so that each translation unit gets its own copy and the C compiler can
+// optimize.
+#ifndef KRML_NOUINT128
+typedef unsigned __int128 FStar_UInt128_t, FStar_UInt128_t_, uint128_t;
+
+static inline void print128(unsigned char *where, uint128_t n) {
+ printf("%s: [%" PRIu64 ",%" PRIu64 "]\n", where, (uint64_t)(n >> 64),
+ (uint64_t)n);
+}
+
+static inline uint128_t load128_le(uint8_t *b) {
+ uint128_t l = (uint128_t)load64_le(b);
+ uint128_t h = (uint128_t)load64_le(b + 8);
+ return (h << 64 | l);
+}
+
+static inline void store128_le(uint8_t *b, uint128_t n) {
+ store64_le(b, (uint64_t)n);
+ store64_le(b + 8, (uint64_t)(n >> 64));
+}
+
+static inline uint128_t load128_be(uint8_t *b) {
+ uint128_t h = (uint128_t)load64_be(b);
+ uint128_t l = (uint128_t)load64_be(b + 8);
+ return (h << 64 | l);
+}
+
+static inline void store128_be(uint8_t *b, uint128_t n) {
+ store64_be(b, (uint64_t)(n >> 64));
+ store64_be(b + 8, (uint64_t)n);
+}
+
+#define FStar_UInt128_add(x, y) ((x) + (y))
+#define FStar_UInt128_mul(x, y) ((x) * (y))
+#define FStar_UInt128_add_mod(x, y) ((x) + (y))
+#define FStar_UInt128_sub(x, y) ((x) - (y))
+#define FStar_UInt128_sub_mod(x, y) ((x) - (y))
+#define FStar_UInt128_logand(x, y) ((x) & (y))
+#define FStar_UInt128_logor(x, y) ((x) | (y))
+#define FStar_UInt128_logxor(x, y) ((x) ^ (y))
+#define FStar_UInt128_lognot(x) (~(x))
+#define FStar_UInt128_shift_left(x, y) ((x) << (y))
+#define FStar_UInt128_shift_right(x, y) ((x) >> (y))
+#define FStar_UInt128_uint64_to_uint128(x) ((uint128_t)(x))
+#define FStar_UInt128_uint128_to_uint64(x) ((uint64_t)(x))
+#define FStar_Int_Cast_Full_uint64_to_uint128(x) ((uint128_t)(x))
+#define FStar_Int_Cast_Full_uint128_to_uint64(x) ((uint64_t)(x))
+#define FStar_UInt128_mul_wide(x, y) ((__int128)(x) * (y))
+#define FStar_UInt128_op_Hat_Hat(x, y) ((x) ^ (y))
+
+static inline uint128_t FStar_UInt128_eq_mask(uint128_t x, uint128_t y) {
+ uint64_t mask =
+ FStar_UInt64_eq_mask((uint64_t)(x >> 64), (uint64_t)(y >> 64)) &
+ FStar_UInt64_eq_mask(x, y);
+ return ((uint128_t)mask) << 64 | mask;
+}
+
+static inline uint128_t FStar_UInt128_gte_mask(uint128_t x, uint128_t y) {
+ uint64_t mask =
+ (FStar_UInt64_gte_mask(x >> 64, y >> 64) &
+ ~(FStar_UInt64_eq_mask(x >> 64, y >> 64))) |
+ (FStar_UInt64_eq_mask(x >> 64, y >> 64) & FStar_UInt64_gte_mask(x, y));
+ return ((uint128_t)mask) << 64 | mask;
+}
+
+#else // !defined(KRML_UINT128)
+
+// This is a bad circular dependency... should fix it properly.
+#include "FStar.h"
+
+typedef FStar_UInt128_uint128 FStar_UInt128_t_, uint128_t;
+
+// A series of definitions written using pointers.
+#ifdef KREMLIB_EMBEDDED_TARGET
+static inline void print128_(unsigned char *where __attribute__((unused)), uint128_t *n __attribute__((unused))) {}
+#else
+static inline void print128_(unsigned char *where, uint128_t *n) {
+ printf("%s: [%" PRIu64 ",%" PRIu64 "]\n", where, n->high, n->low);
+}
+#endif
+
+
+static inline void load128_le_(uint8_t *b, uint128_t *r) {
+ r->low = load64_le(b);
+ r->high = load64_le(b + 8);
+}
+
+static inline void store128_le_(uint8_t *b, uint128_t *n) {
+ store64_le(b, n->low);
+ store64_le(b + 8, n->high);
+}
+
+static inline void load128_be_(uint8_t *b, uint128_t *r) {
+ r->high = load64_be(b);
+ r->low = load64_be(b + 8);
+}
+
+static inline void store128_be_(uint8_t *b, uint128_t *n) {
+ store64_be(b, n->high);
+ store64_be(b + 8, n->low);
+}
+
+#ifndef KRML_NOSTRUCT_PASSING
+
+static inline void print128(unsigned char *where, uint128_t n) {
+ print128_(where, &n);
+}
+
+static inline uint128_t load128_le(uint8_t *b) {
+ uint128_t r;
+ load128_le_(b, &r);
+ return r;
+}
+
+static inline void store128_le(uint8_t *b, uint128_t n) { store128_le_(b, &n); }
+
+static inline uint128_t load128_be(uint8_t *b) {
+ uint128_t r;
+ load128_be_(b, &r);
+ return r;
+}
+
+static inline void store128_be(uint8_t *b, uint128_t n) { store128_be_(b, &n); }
+
+#else // !defined(KRML_STRUCT_PASSING)
+
+#define print128 print128_
+#define load128_le load128_le_
+#define store128_le store128_le_
+#define load128_be load128_be_
+#define store128_be store128_be_
+
+#endif // KRML_STRUCT_PASSING
+#endif // KRML_UINT128
+#endif // __KREMLIB_H
diff --git a/sw/airborne/modules/datalink/hacl-c/testlib.c b/sw/airborne/modules/datalink/hacl-c/testlib.c
new file mode 100644
index 0000000000..8bd14b6a1c
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/testlib.c
@@ -0,0 +1,59 @@
+#include "testlib.h"
+
+void TestLib_compare_and_print(const char *txt, uint8_t *reference,
+ uint8_t *output, int size) {
+ char *str = malloc(2 * size + 1);
+ char *str_ref = malloc(2 * size + 1);
+ for (int i = 0; i < size; ++i) {
+ sprintf(str + 2 * i, "%02x", output[i]);
+ sprintf(str_ref + 2 * i, "%02x", reference[i]);
+ }
+ str[2 * size] = '\0';
+ str_ref[2 * size] = '\0';
+ printf("[test] expected output %s is %s\n", txt, str_ref);
+ printf("[test] computed output %s is %s\n", txt, str);
+
+ for (int i = 0; i < size; ++i) {
+ if (output[i] != reference[i]) {
+ fprintf(stderr, "[test] reference %s and expected %s differ at byte %d\n",
+ txt, txt, i);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ printf("[test] %s is a success\n", txt);
+
+ free(str);
+ free(str_ref);
+}
+
+void TestLib_touch(int32_t x) {}
+
+void TestLib_check(int32_t x, int32_t y) {
+ if (x != y) {
+ printf("Test check failure: %" PRId32 " != %" PRId32 "\n", x, y);
+ exit(253);
+ }
+}
+
+void *TestLib_unsafe_malloc(size_t size) {
+ void *memblob = malloc(size);
+ if (memblob == NULL) {
+ printf(" WARNING : malloc failed in tests !\n");
+ exit(EXIT_FAILURE);
+ }
+ return memblob;
+}
+
+void TestLib_print_clock_diff(clock_t t1, clock_t t2) {
+ printf("User time: %f\n", ((double)t2 - t1) / CLOCKS_PER_SEC);
+}
+
+void TestLib_perr(unsigned int err_code) {
+ printf("Got error code %u.\n", err_code);
+}
+
+void TestLib_print_cycles_per_round(TestLib_cycles c1, TestLib_cycles c2, uint32_t rounds) {
+ printf("[perf] cpu cycles per round (averaged over %d) is %f\n", rounds,
+ (float)(c2 - c1) / rounds);
+}
diff --git a/sw/airborne/modules/datalink/hacl-c/testlib.h b/sw/airborne/modules/datalink/hacl-c/testlib.h
new file mode 100644
index 0000000000..95d632e0ed
--- /dev/null
+++ b/sw/airborne/modules/datalink/hacl-c/testlib.h
@@ -0,0 +1,79 @@
+#ifndef __TESTLIB_H
+#define __TESTLIB_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// This file has a hand-written .h file so that test files written in C (e.g.
+// main-Poly1305.c) can use the functions from this file too (e.g.
+// [compare_and_print]).
+
+// Functions for F*-written tests, exposed via TestLib.fsti
+void TestLib_touch(int32_t);
+void TestLib_check(int32_t, int32_t);
+
+// These functions are also called by HACL
+void TestLib_compare_and_print(const char *txt, uint8_t *reference,
+ uint8_t *output, int size);
+
+void *TestLib_unsafe_malloc(size_t size);
+void TestLib_print_clock_diff(clock_t t1, clock_t t2);
+void TestLib_perr(unsigned int err_code);
+
+#define TestLib_uint8_p_null NULL
+#define TestLib_uint32_p_null NULL
+#define TestLib_uint64_p_null NULL
+
+typedef unsigned long long TestLib_cycles, cycles;
+
+#if defined(__COMPCERT__)
+static __inline__ TestLib_cycles TestLib_cpucycles(void) {
+ return 0;
+}
+
+static __inline__ TestLib_cycles TestLib_cpucycles_begin(void)
+{
+ return 0;
+}
+
+static __inline__ TestLib_cycles TestLib_cpucycles_end(void)
+{
+ return 0;
+}
+
+#else
+
+#ifndef _MSC_VER
+static __inline__ TestLib_cycles TestLib_cpucycles(void) {
+ unsigned hi, lo;
+ __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
+ return ((unsigned long long)lo) | (((unsigned long long)hi) << 32);
+}
+
+static __inline__ TestLib_cycles TestLib_cpucycles_begin(void)
+{
+ unsigned hi, lo;
+ __asm__ __volatile__ ("CPUID\n\t" "RDTSC\n\t" "mov %%edx, %0\n\t" "mov %%eax, %1\n\t": "=r" (hi), "=r" (lo):: "%rax", "%rbx", "%rcx", "%rdx");
+ return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
+}
+
+static __inline__ TestLib_cycles TestLib_cpucycles_end(void)
+{
+ unsigned hi, lo;
+ __asm__ __volatile__ ("RDTSCP\n\t" "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "CPUID\n\t": "=r" (hi), "=r" (lo):: "%rax", "%rbx", "%rcx", "%rdx");
+ return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
+}
+#endif
+#endif
+
+void TestLib_print_cycles_per_round(TestLib_cycles c1, TestLib_cycles c2, uint32_t rounds);
+
+
+#endif
diff --git a/sw/airborne/modules/datalink/spprz_dl.c b/sw/airborne/modules/datalink/spprz_dl.c
new file mode 100644
index 0000000000..21c8848c74
--- /dev/null
+++ b/sw/airborne/modules/datalink/spprz_dl.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 Michal Podhradsky
+ *
+ * This file is part of paparazzi.
+ *
+ * paparazzi is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * paparazzi is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with paparazzi; see the file COPYING. If not, see
+ * .
+ *
+ */
+
+/** \file modules/datalink/spprz_dl.c
+ * \brief Datalink using secure PPRZ protocol
+ */
+
+#include "modules/datalink/spprz_dl.h"
+#include "subsystems/datalink/datalink.h"
+
+struct spprz_transport spprz_tp;
+
+void spprz_dl_init(void)
+{
+ spprz_transport_init(&spprz_tp);
+}
+
+void spprz_dl_event(void)
+{
+ spprz_check_and_parse(&DOWNLINK_DEVICE.device, &spprz_tp, dl_buffer, &dl_msg_available);
+ DlCheckAndParse(&DOWNLINK_DEVICE.device, &spprz_tp.trans_tx, dl_buffer, &dl_msg_available);
+}
diff --git a/sw/airborne/modules/datalink/spprz_dl.h b/sw/airborne/modules/datalink/spprz_dl.h
new file mode 100644
index 0000000000..017fd19f09
--- /dev/null
+++ b/sw/airborne/modules/datalink/spprz_dl.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 Michal Podhradsky
+ *
+ * This file is part of paparazzi.
+ *
+ * paparazzi is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * paparazzi is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with paparazzi; see the file COPYING. If not, see
+ * .
+ *
+ */
+
+/** \file modules/datalink/spprz_dl.h
+ * \brief Datalink using secure PPRZ protocol
+ */
+
+#ifndef SPPRZ_DL_H
+#define SPPRZ_DL_H
+
+#include "pprzlink/secure_pprz_transport.h"
+
+#include "mcu_periph/uart.h"
+#if USE_USB_SERIAL
+#include "mcu_periph/usb_serial.h"
+#endif
+#if USE_UDP
+#include "mcu_periph/udp.h"
+#endif
+
+/** PPRZ transport structure */
+extern struct spprz_transport spprz_tp;
+
+/** Init function */
+extern void spprz_dl_init(void);
+
+/** Datalink Event */
+extern void spprz_dl_event(void);
+
+#endif /* SPPRZ_DL_H */
+