diff --git a/include/EverCrypt_Hash.h b/include/EverCrypt_Hash.h
index 6791dc27..431b1375 100644
--- a/include/EverCrypt_Hash.h
+++ b/include/EverCrypt_Hash.h
@@ -121,29 +121,29 @@ EverCrypt_Hash_Incremental_hash(
   uint32_t len
 );
 
-#define MD5_HASH_LEN ((uint32_t)16U)
+#define MD5_HASH_LEN (16U)
 
-#define SHA1_HASH_LEN ((uint32_t)20U)
+#define SHA1_HASH_LEN (20U)
 
-#define SHA2_224_HASH_LEN ((uint32_t)28U)
+#define SHA2_224_HASH_LEN (28U)
 
-#define SHA2_256_HASH_LEN ((uint32_t)32U)
+#define SHA2_256_HASH_LEN (32U)
 
-#define SHA2_384_HASH_LEN ((uint32_t)48U)
+#define SHA2_384_HASH_LEN (48U)
 
-#define SHA2_512_HASH_LEN ((uint32_t)64U)
+#define SHA2_512_HASH_LEN (64U)
 
-#define SHA3_224_HASH_LEN ((uint32_t)28U)
+#define SHA3_224_HASH_LEN (28U)
 
-#define SHA3_256_HASH_LEN ((uint32_t)32U)
+#define SHA3_256_HASH_LEN (32U)
 
-#define SHA3_384_HASH_LEN ((uint32_t)48U)
+#define SHA3_384_HASH_LEN (48U)
 
-#define SHA3_512_HASH_LEN ((uint32_t)64U)
+#define SHA3_512_HASH_LEN (64U)
 
-#define BLAKE2S_HASH_LEN ((uint32_t)32U)
+#define BLAKE2S_HASH_LEN (32U)
 
-#define BLAKE2B_HASH_LEN ((uint32_t)64U)
+#define BLAKE2B_HASH_LEN (64U)
 
 #if defined(__cplusplus)
 }
diff --git a/include/Hacl_IntTypes_Intrinsics.h b/include/Hacl_IntTypes_Intrinsics.h
index e2a193e9..c816b046 100644
--- a/include/Hacl_IntTypes_Intrinsics.h
+++ b/include/Hacl_IntTypes_Intrinsics.h
@@ -41,7 +41,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_add_carry_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x + (uint64_t)cin + (uint64_t)y;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U);
+  uint32_t c = (uint32_t)(res >> 32U);
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -50,7 +50,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_sub_borrow_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x - (uint64_t)y - (uint64_t)cin;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U) & (uint32_t)1U;
+  uint32_t c = (uint32_t)(res >> 32U) & 1U;
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -59,8 +59,7 @@ static inline uint64_t
 Hacl_IntTypes_Intrinsics_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r)
 {
   uint64_t res = x + cin + y;
-  uint64_t
-  c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & (uint64_t)1U;
+  uint64_t c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & 1ULL;
   r[0U] = res;
   return c;
 }
@@ -73,7 +72,7 @@ Hacl_IntTypes_Intrinsics_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, ui
   c =
     ((FStar_UInt64_gte_mask(res, x) & ~FStar_UInt64_eq_mask(res, x))
     | (FStar_UInt64_eq_mask(res, x) & cin))
-    & (uint64_t)1U;
+    & 1ULL;
   r[0U] = res;
   return c;
 }
diff --git a/include/Hacl_IntTypes_Intrinsics_128.h b/include/Hacl_IntTypes_Intrinsics_128.h
index aa843a6c..d3008969 100644
--- a/include/Hacl_IntTypes_Intrinsics_128.h
+++ b/include/Hacl_IntTypes_Intrinsics_128.h
@@ -45,7 +45,7 @@ Hacl_IntTypes_Intrinsics_128_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y,
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(cin)),
       FStar_UInt128_uint64_to_uint128(y));
-  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
@@ -58,10 +58,7 @@ Hacl_IntTypes_Intrinsics_128_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y
     FStar_UInt128_sub_mod(FStar_UInt128_sub_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(y)),
       FStar_UInt128_uint64_to_uint128(cin));
-  uint64_t
-  c =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U))
-    & (uint64_t)1U;
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U)) & 1ULL;
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
diff --git a/include/internal/Hacl_Bignum.h b/include/internal/Hacl_Bignum.h
index 901a8dad..4b31236d 100644
--- a/include/internal/Hacl_Bignum.h
+++ b/include/internal/Hacl_Bignum.h
@@ -124,15 +124,6 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(
   uint32_t *res
 );
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
-  uint32_t len,
-  uint32_t *n,
-  uint32_t nInv,
-  uint32_t *c,
-  uint32_t *res
-);
-
 void
 Hacl_Bignum_Montgomery_bn_to_mont_u32(
   uint32_t len,
@@ -181,15 +172,6 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(
   uint64_t *res
 );
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
-  uint32_t len,
-  uint64_t *n,
-  uint64_t nInv,
-  uint64_t *c,
-  uint64_t *res
-);
-
 void
 Hacl_Bignum_Montgomery_bn_to_mont_u64(
   uint32_t len,
@@ -228,6 +210,24 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u64(
   uint64_t *resM
 );
 
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(
+  uint32_t len,
+  uint32_t *n,
+  uint32_t nInv,
+  uint32_t *c,
+  uint32_t *res
+);
+
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(
+  uint32_t len,
+  uint64_t *n,
+  uint64_t nInv,
+  uint64_t *c,
+  uint64_t *res
+);
+
 uint32_t
 Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(
   uint32_t len,
diff --git a/include/internal/Hacl_Bignum25519_51.h b/include/internal/Hacl_Bignum25519_51.h
index 25a10503..4678f8a0 100644
--- a/include/internal/Hacl_Bignum25519_51.h
+++ b/include/internal/Hacl_Bignum25519_51.h
@@ -69,11 +69,11 @@ static inline void Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1
   uint64_t f23 = f2[3U];
   uint64_t f14 = f1[4U];
   uint64_t f24 = f2[4U];
-  out[0U] = f10 + (uint64_t)0x3fffffffffff68U - f20;
-  out[1U] = f11 + (uint64_t)0x3ffffffffffff8U - f21;
-  out[2U] = f12 + (uint64_t)0x3ffffffffffff8U - f22;
-  out[3U] = f13 + (uint64_t)0x3ffffffffffff8U - f23;
-  out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24;
+  out[0U] = f10 + 0x3fffffffffff68ULL - f20;
+  out[1U] = f11 + 0x3ffffffffffff8ULL - f21;
+  out[2U] = f12 + 0x3ffffffffffff8ULL - f22;
+  out[3U] = f13 + 0x3ffffffffffff8ULL - f23;
+  out[4U] = f14 + 0x3ffffffffffff8ULL - f24;
 }
 
 static inline void
@@ -84,7 +84,7 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -95,10 +95,10 @@ Hacl_Impl_Curve25519_Field51_fmul(
   uint64_t f22 = f2[2U];
   uint64_t f23 = f2[3U];
   uint64_t f24 = f2[4U];
-  uint64_t tmp1 = f21 * (uint64_t)19U;
-  uint64_t tmp2 = f22 * (uint64_t)19U;
-  uint64_t tmp3 = f23 * (uint64_t)19U;
-  uint64_t tmp4 = f24 * (uint64_t)19U;
+  uint64_t tmp1 = f21 * 19ULL;
+  uint64_t tmp2 = f22 * 19ULL;
+  uint64_t tmp3 = f23 * 19ULL;
+  uint64_t tmp4 = f24 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o10 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o20 = FStar_UInt128_mul_wide(f10, f22);
@@ -129,25 +129,24 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 tmp_w2 = o24;
   FStar_UInt128_uint128 tmp_w3 = o34;
   FStar_UInt128_uint128 tmp_w4 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp01 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp01 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp11 + c5;
   uint64_t o2 = tmp21;
@@ -168,7 +167,7 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -189,14 +188,14 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   uint64_t f42 = f2[7U];
   uint64_t f43 = f2[8U];
   uint64_t f44 = f2[9U];
-  uint64_t tmp11 = f21 * (uint64_t)19U;
-  uint64_t tmp12 = f22 * (uint64_t)19U;
-  uint64_t tmp13 = f23 * (uint64_t)19U;
-  uint64_t tmp14 = f24 * (uint64_t)19U;
-  uint64_t tmp21 = f41 * (uint64_t)19U;
-  uint64_t tmp22 = f42 * (uint64_t)19U;
-  uint64_t tmp23 = f43 * (uint64_t)19U;
-  uint64_t tmp24 = f44 * (uint64_t)19U;
+  uint64_t tmp11 = f21 * 19ULL;
+  uint64_t tmp12 = f22 * 19ULL;
+  uint64_t tmp13 = f23 * 19ULL;
+  uint64_t tmp14 = f24 * 19ULL;
+  uint64_t tmp21 = f41 * 19ULL;
+  uint64_t tmp22 = f42 * 19ULL;
+  uint64_t tmp23 = f43 * 19ULL;
+  uint64_t tmp24 = f44 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o15 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o25 = FStar_UInt128_mul_wide(f10, f22);
@@ -257,49 +256,47 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 tmp_w22 = o241;
   FStar_UInt128_uint128 tmp_w23 = o34;
   FStar_UInt128_uint128 tmp_w24 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w11, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w12, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w13, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w14, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o100 = tmp0_;
   uint64_t o112 = tmp10 + c50;
   uint64_t o122 = tmp20;
   uint64_t o132 = tmp30;
   uint64_t o142 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(tmp_w21, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(tmp_w22, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(tmp_w23, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(tmp_w24, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o200 = tmp0_0;
   uint64_t o212 = tmp1 + c5;
   uint64_t o222 = tmp2;
@@ -339,25 +336,24 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
   FStar_UInt128_uint128 tmp_w2 = FStar_UInt128_mul_wide(f2, f12);
   FStar_UInt128_uint128 tmp_w3 = FStar_UInt128_mul_wide(f2, f13);
   FStar_UInt128_uint128 tmp_w4 = FStar_UInt128_mul_wide(f2, f14);
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -373,18 +369,18 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t d0 = (uint64_t)2U * f0;
-  uint64_t d1 = (uint64_t)2U * f1;
-  uint64_t d2 = (uint64_t)38U * f2;
-  uint64_t d3 = (uint64_t)19U * f3;
-  uint64_t d419 = (uint64_t)19U * f4;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f0;
+  uint64_t d1 = 2ULL * f1;
+  uint64_t d2 = 38ULL * f2;
+  uint64_t d3 = 19ULL * f3;
+  uint64_t d419 = 19ULL * f4;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f0, f0),
@@ -415,25 +411,24 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
   FStar_UInt128_uint128 o20 = s2;
   FStar_UInt128_uint128 o30 = s3;
   FStar_UInt128_uint128 o40 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o10, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o20, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o30, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o40, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -449,7 +444,7 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f[0U];
   uint64_t f11 = f[1U];
   uint64_t f12 = f[2U];
@@ -460,12 +455,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   uint64_t f22 = f[7U];
   uint64_t f23 = f[8U];
   uint64_t f24 = f[9U];
-  uint64_t d00 = (uint64_t)2U * f10;
-  uint64_t d10 = (uint64_t)2U * f11;
-  uint64_t d20 = (uint64_t)38U * f12;
-  uint64_t d30 = (uint64_t)19U * f13;
-  uint64_t d4190 = (uint64_t)19U * f14;
-  uint64_t d40 = (uint64_t)2U * d4190;
+  uint64_t d00 = 2ULL * f10;
+  uint64_t d10 = 2ULL * f11;
+  uint64_t d20 = 38ULL * f12;
+  uint64_t d30 = 19ULL * f13;
+  uint64_t d4190 = 19ULL * f14;
+  uint64_t d40 = 2ULL * d4190;
   FStar_UInt128_uint128
   s00 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f10, f10),
@@ -496,12 +491,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o120 = s20;
   FStar_UInt128_uint128 o130 = s30;
   FStar_UInt128_uint128 o140 = s40;
-  uint64_t d0 = (uint64_t)2U * f20;
-  uint64_t d1 = (uint64_t)2U * f21;
-  uint64_t d2 = (uint64_t)38U * f22;
-  uint64_t d3 = (uint64_t)19U * f23;
-  uint64_t d419 = (uint64_t)19U * f24;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f20;
+  uint64_t d1 = 2ULL * f21;
+  uint64_t d2 = 38ULL * f22;
+  uint64_t d3 = 19ULL * f23;
+  uint64_t d419 = 19ULL * f24;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f20, f20),
@@ -532,49 +527,47 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o220 = s2;
   FStar_UInt128_uint128 o230 = s3;
   FStar_UInt128_uint128 o240 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o110, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o120, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o130, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o140, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o101 = tmp0_;
   uint64_t o111 = tmp10 + c50;
   uint64_t o121 = tmp20;
   uint64_t o131 = tmp30;
   uint64_t o141 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(o210, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(o220, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(o230, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(o240, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o201 = tmp0_0;
   uint64_t o211 = tmp1 + c5;
   uint64_t o221 = tmp2;
@@ -609,49 +602,49 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f01 = tmp0_;
   uint64_t f11 = tmp1 + c5;
   uint64_t f21 = tmp2;
   uint64_t f31 = tmp3;
   uint64_t f41 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f01, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f11, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f21, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f31, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f41, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f01, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f11, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f21, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f31, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f41, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f01 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f11 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f21 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f31 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f41 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f01 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f11 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f21 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f31 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f41 - (mask & 0x7ffffffffffffULL);
   uint64_t f02 = f0_;
   uint64_t f12 = f1_;
   uint64_t f22 = f2_;
   uint64_t f32 = f3_;
   uint64_t f42 = f4_;
-  uint64_t o00 = f02 | f12 << (uint32_t)51U;
-  uint64_t o10 = f12 >> (uint32_t)13U | f22 << (uint32_t)38U;
-  uint64_t o20 = f22 >> (uint32_t)26U | f32 << (uint32_t)25U;
-  uint64_t o30 = f32 >> (uint32_t)39U | f42 << (uint32_t)12U;
+  uint64_t o00 = f02 | f12 << 51U;
+  uint64_t o10 = f12 >> 13U | f22 << 38U;
+  uint64_t o20 = f22 >> 26U | f32 << 25U;
+  uint64_t o30 = f32 >> 39U | f42 << 12U;
   uint64_t o0 = o00;
   uint64_t o1 = o10;
   uint64_t o2 = o20;
@@ -665,11 +658,11 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
 static inline void
 Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
 {
-  uint64_t mask = (uint64_t)0U - bit;
+  uint64_t mask = 0ULL - bit;
   KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
+    0U,
+    10U,
+    1U,
     uint64_t dummy = mask & (p1[i] ^ p2[i]);
     p1[i] = p1[i] ^ dummy;
     p2[i] = p2[i] ^ dummy;);
diff --git a/include/internal/Hacl_Bignum_Base.h b/include/internal/Hacl_Bignum_Base.h
index 2cfb0066..f2e282f4 100644
--- a/include/internal/Hacl_Bignum_Base.h
+++ b/include/internal/Hacl_Bignum_Base.h
@@ -45,7 +45,7 @@ Hacl_Bignum_Base_mul_wide_add2_u32(uint32_t a, uint32_t b, uint32_t c_in, uint32
   uint32_t out0 = out[0U];
   uint64_t res = (uint64_t)a * (uint64_t)b + (uint64_t)c_in + (uint64_t)out0;
   out[0U] = (uint32_t)res;
-  return (uint32_t)(res >> (uint32_t)32U);
+  return (uint32_t)(res >> 32U);
 }
 
 static inline uint64_t
@@ -58,22 +58,22 @@ Hacl_Bignum_Base_mul_wide_add2_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64
         FStar_UInt128_uint64_to_uint128(c_in)),
       FStar_UInt128_uint64_to_uint128(out0));
   out[0U] = FStar_UInt128_uint128_to_uint64(res);
-  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
 }
 
 static inline void
 Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -82,24 +82,24 @@ Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *
 static inline void
 Hacl_Bignum_Convert_bn_to_bytes_be_uint64(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
 
 static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32_t *b)
 {
-  uint32_t priv = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t priv = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint32_t mask = FStar_UInt32_eq_mask(b[i], (uint32_t)0U);
+    uint32_t mask = FStar_UInt32_eq_mask(b[i], 0U);
     priv = (mask & priv) | (~mask & i);
   }
   return priv;
@@ -107,10 +107,10 @@ static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32
 
 static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64_t *b)
 {
-  uint64_t priv = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t priv = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint64_t mask = FStar_UInt64_eq_mask(b[i], (uint64_t)0U);
+    uint64_t mask = FStar_UInt64_eq_mask(b[i], 0ULL);
     priv = (mask & priv) | (~mask & (uint64_t)i);
   }
   return priv;
@@ -119,63 +119,63 @@ static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64
 static inline uint32_t
 Hacl_Bignum_Lib_bn_get_bits_u32(uint32_t len, uint32_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)32U;
-  uint32_t j = i % (uint32_t)32U;
+  uint32_t i1 = i / 32U;
+  uint32_t j = i % 32U;
   uint32_t p1 = b[i1] >> j;
   uint32_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j);
+    ite = p1 | b[i1 + 1U] << (32U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint32_t)1U << l) - (uint32_t)1U);
+  return ite & ((1U << l) - 1U);
 }
 
 static inline uint64_t
 Hacl_Bignum_Lib_bn_get_bits_u64(uint32_t len, uint64_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)64U;
-  uint32_t j = i % (uint32_t)64U;
+  uint32_t i1 = i / 64U;
+  uint32_t j = i % 64U;
   uint64_t p1 = b[i1] >> j;
   uint64_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j);
+    ite = p1 | b[i1 + 1U] << (64U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint64_t)1U << l) - (uint64_t)1U);
+  return ite & ((1ULL << l) - 1ULL);
 }
 
 static inline uint32_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -188,27 +188,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -221,27 +221,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b,
 static inline uint32_t
 Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -254,27 +254,27 @@ Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_add_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -294,27 +294,27 @@ Hacl_Bignum_Multiplication_bn_mul_u32(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint32_t a_i = a[i];
       uint32_t *res_i = res_j + i;
@@ -335,27 +335,27 @@ Hacl_Bignum_Multiplication_bn_mul_u64(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -370,28 +370,28 @@ static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -401,48 +401,48 @@ Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
     res[i0 + i0] = r;
   }
   uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen);
   uint32_t tmp[aLen + aLen];
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -452,20 +452,20 @@ Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
     res[i0 + i0] = r;
   }
   uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen);
   uint64_t tmp[aLen + aLen];
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Bignum_K256.h b/include/internal/Hacl_Bignum_K256.h
index 59aff176..fe72fffe 100644
--- a/include/internal/Hacl_Bignum_K256.h
+++ b/include/internal/Hacl_Bignum_K256.h
@@ -45,13 +45,7 @@ static inline bool Hacl_K256_Field_is_felem_zero_vartime(uint64_t *f)
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  return
-    f0
-    == (uint64_t)0U
-    && f1 == (uint64_t)0U
-    && f2 == (uint64_t)0U
-    && f3 == (uint64_t)0U
-    && f4 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL && f4 == 0ULL;
 }
 
 static inline bool Hacl_K256_Field_is_felem_eq_vartime(uint64_t *f1, uint64_t *f2)
@@ -76,42 +70,42 @@ static inline bool Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(uint64_
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  if (f4 > (uint64_t)0U)
+  if (f4 > 0ULL)
   {
     return false;
   }
-  if (f3 > (uint64_t)0U)
+  if (f3 > 0ULL)
   {
     return false;
   }
-  if (f2 < (uint64_t)0x1455123U)
+  if (f2 < 0x1455123ULL)
   {
     return true;
   }
-  if (f2 > (uint64_t)0x1455123U)
+  if (f2 > 0x1455123ULL)
   {
     return false;
   }
-  if (f1 < (uint64_t)0x1950b75fc4402U)
+  if (f1 < 0x1950b75fc4402ULL)
   {
     return true;
   }
-  if (f1 > (uint64_t)0x1950b75fc4402U)
+  if (f1 > 0x1950b75fc4402ULL)
   {
     return false;
   }
-  return f0 < (uint64_t)0xda1722fc9baeeU;
+  return f0 < 0xda1722fc9baeeULL;
 }
 
 static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = b + i * (uint32_t)8U;
+    uint8_t *bj = b + i * 8U;
     uint64_t u = load64_be(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -120,11 +114,11 @@ static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
   uint64_t s1 = tmp[2U];
   uint64_t s2 = tmp[1U];
   uint64_t s3 = tmp[0U];
-  uint64_t f00 = s0 & (uint64_t)0xfffffffffffffU;
-  uint64_t f10 = s0 >> (uint32_t)52U | (s1 & (uint64_t)0xffffffffffU) << (uint32_t)12U;
-  uint64_t f20 = s1 >> (uint32_t)40U | (s2 & (uint64_t)0xfffffffU) << (uint32_t)24U;
-  uint64_t f30 = s2 >> (uint32_t)28U | (s3 & (uint64_t)0xffffU) << (uint32_t)36U;
-  uint64_t f40 = s3 >> (uint32_t)16U;
+  uint64_t f00 = s0 & 0xfffffffffffffULL;
+  uint64_t f10 = s0 >> 52U | (s1 & 0xffffffffffULL) << 12U;
+  uint64_t f20 = s1 >> 40U | (s2 & 0xfffffffULL) << 24U;
+  uint64_t f30 = s2 >> 28U | (s3 & 0xffffULL) << 36U;
+  uint64_t f40 = s3 >> 16U;
   uint64_t f0 = f00;
   uint64_t f1 = f10;
   uint64_t f2 = f20;
@@ -148,11 +142,11 @@ static inline bool Hacl_K256_Field_load_felem_lt_prime_vartime(uint64_t *f, uint
   bool
   is_ge_p =
     f0
-    >= (uint64_t)0xffffefffffc2fU
-    && f1 == (uint64_t)0xfffffffffffffU
-    && f2 == (uint64_t)0xfffffffffffffU
-    && f3 == (uint64_t)0xfffffffffffffU
-    && f4 == (uint64_t)0xffffffffffffU;
+    >= 0xffffefffffc2fULL
+    && f1 == 0xfffffffffffffULL
+    && f2 == 0xfffffffffffffULL
+    && f3 == 0xfffffffffffffULL
+    && f4 == 0xffffffffffffULL;
   return !is_ge_p;
 }
 
@@ -164,10 +158,10 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t o0 = f00 | f10 << (uint32_t)52U;
-  uint64_t o1 = f10 >> (uint32_t)12U | f20 << (uint32_t)40U;
-  uint64_t o2 = f20 >> (uint32_t)24U | f30 << (uint32_t)28U;
-  uint64_t o3 = f30 >> (uint32_t)36U | f4 << (uint32_t)16U;
+  uint64_t o0 = f00 | f10 << 52U;
+  uint64_t o1 = f10 >> 12U | f20 << 40U;
+  uint64_t o2 = f20 >> 24U | f30 << 28U;
+  uint64_t o3 = f30 >> 36U | f4 << 16U;
   uint64_t f0 = o0;
   uint64_t f1 = o1;
   uint64_t f2 = o2;
@@ -176,11 +170,7 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   tmp[1U] = f2;
   tmp[2U] = f1;
   tmp[3U] = f0;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, tmp[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, tmp[i]););
 }
 
 static inline void Hacl_K256_Field_fmul_small_num(uint64_t *out, uint64_t *f, uint64_t num)
@@ -248,11 +238,11 @@ static inline void Hacl_K256_Field_fsub(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r00 = (uint64_t)9007190664804446U * x - b0;
-  uint64_t r10 = (uint64_t)9007199254740990U * x - b1;
-  uint64_t r20 = (uint64_t)9007199254740990U * x - b2;
-  uint64_t r30 = (uint64_t)9007199254740990U * x - b3;
-  uint64_t r40 = (uint64_t)562949953421310U * x - b4;
+  uint64_t r00 = 9007190664804446ULL * x - b0;
+  uint64_t r10 = 9007199254740990ULL * x - b1;
+  uint64_t r20 = 9007199254740990ULL * x - b2;
+  uint64_t r30 = 9007199254740990ULL * x - b3;
+  uint64_t r40 = 562949953421310ULL * x - b4;
   uint64_t r0 = r00;
   uint64_t r1 = r10;
   uint64_t r2 = r20;
@@ -287,7 +277,7 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0,
@@ -298,9 +288,9 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, b4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
@@ -309,12 +299,11 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b2)),
         FStar_UInt128_mul_wide(a3, b1)),
       FStar_UInt128_mul_wide(a4, b0));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, b0);
   FStar_UInt128_uint128
   d6 =
@@ -323,13 +312,12 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b3)),
         FStar_UInt128_mul_wide(a3, b2)),
       FStar_UInt128_mul_wide(a4, b1));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
   FStar_UInt128_uint128
   c5 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a0, b1)),
@@ -343,10 +331,10 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7,
@@ -359,16 +347,15 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
       FStar_UInt128_mul_wide(a4, b3));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f11 = r1;
@@ -389,43 +376,41 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   uint64_t a4 = f[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
-    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * (uint64_t)2U, a3),
-      FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a2));
+    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * 2ULL, a3),
+      FStar_UInt128_mul_wide(a1 * 2ULL, a2));
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, a4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
-  uint64_t a41 = a4 * (uint64_t)2U;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
+  uint64_t a41 = a4 * 2ULL;
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
           FStar_UInt128_mul_wide(a0, a41)),
-        FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a3)),
+        FStar_UInt128_mul_wide(a1 * 2ULL, a3)),
       FStar_UInt128_mul_wide(a2, a2));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, a0);
   FStar_UInt128_uint128
   d6 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(d5, FStar_UInt128_mul_wide(a1, a41)),
-      FStar_UInt128_mul_wide(a2 * (uint64_t)2U, a3));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
-  uint64_t a01 = a0 * (uint64_t)2U;
+      FStar_UInt128_mul_wide(a2 * 2ULL, a3));
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
+  uint64_t a01 = a0 * 2ULL;
   FStar_UInt128_uint128 c5 = FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a01, a1));
   FStar_UInt128_uint128
   d8 =
@@ -434,10 +419,10 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7, FStar_UInt128_mul_wide(a01, a2)),
@@ -445,16 +430,15 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128 d10 = FStar_UInt128_add_mod(d9, FStar_UInt128_mul_wide(a3, a41));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
@@ -475,23 +459,23 @@ static inline void Hacl_K256_Field_fnormalize_weak(uint64_t *out, uint64_t *f)
   uint64_t t2 = f[2U];
   uint64_t t3 = f[3U];
   uint64_t t4 = f[4U];
-  uint64_t x0 = t4 >> (uint32_t)48U;
-  uint64_t t410 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = t4 >> 48U;
+  uint64_t t410 = t4 & 0xffffffffffffULL;
   uint64_t x = x0;
   uint64_t t01 = t0;
   uint64_t t11 = t1;
   uint64_t t21 = t2;
   uint64_t t31 = t3;
   uint64_t t41 = t410;
-  uint64_t t02 = t01 + x * (uint64_t)0x1000003D1U;
-  uint64_t t12 = t11 + (t02 >> (uint32_t)52U);
-  uint64_t t03 = t02 & (uint64_t)0xfffffffffffffU;
-  uint64_t t22 = t21 + (t12 >> (uint32_t)52U);
-  uint64_t t13 = t12 & (uint64_t)0xfffffffffffffU;
-  uint64_t t32 = t31 + (t22 >> (uint32_t)52U);
-  uint64_t t23 = t22 & (uint64_t)0xfffffffffffffU;
-  uint64_t t42 = t41 + (t32 >> (uint32_t)52U);
-  uint64_t t33 = t32 & (uint64_t)0xfffffffffffffU;
+  uint64_t t02 = t01 + x * 0x1000003D1ULL;
+  uint64_t t12 = t11 + (t02 >> 52U);
+  uint64_t t03 = t02 & 0xfffffffffffffULL;
+  uint64_t t22 = t21 + (t12 >> 52U);
+  uint64_t t13 = t12 & 0xfffffffffffffULL;
+  uint64_t t32 = t31 + (t22 >> 52U);
+  uint64_t t23 = t22 & 0xfffffffffffffULL;
+  uint64_t t42 = t41 + (t32 >> 52U);
+  uint64_t t33 = t32 & 0xfffffffffffffULL;
   uint64_t f0 = t03;
   uint64_t f1 = t13;
   uint64_t f2 = t23;
@@ -511,59 +495,59 @@ static inline void Hacl_K256_Field_fnormalize(uint64_t *out, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f40 = f[4U];
-  uint64_t x0 = f40 >> (uint32_t)48U;
-  uint64_t t40 = f40 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = f40 >> 48U;
+  uint64_t t40 = f40 & 0xffffffffffffULL;
   uint64_t x1 = x0;
   uint64_t t00 = f00;
   uint64_t t10 = f10;
   uint64_t t20 = f20;
   uint64_t t30 = f30;
   uint64_t t42 = t40;
-  uint64_t t01 = t00 + x1 * (uint64_t)0x1000003D1U;
-  uint64_t t110 = t10 + (t01 >> (uint32_t)52U);
-  uint64_t t020 = t01 & (uint64_t)0xfffffffffffffU;
-  uint64_t t210 = t20 + (t110 >> (uint32_t)52U);
-  uint64_t t120 = t110 & (uint64_t)0xfffffffffffffU;
-  uint64_t t310 = t30 + (t210 >> (uint32_t)52U);
-  uint64_t t220 = t210 & (uint64_t)0xfffffffffffffU;
-  uint64_t t410 = t42 + (t310 >> (uint32_t)52U);
-  uint64_t t320 = t310 & (uint64_t)0xfffffffffffffU;
+  uint64_t t01 = t00 + x1 * 0x1000003D1ULL;
+  uint64_t t110 = t10 + (t01 >> 52U);
+  uint64_t t020 = t01 & 0xfffffffffffffULL;
+  uint64_t t210 = t20 + (t110 >> 52U);
+  uint64_t t120 = t110 & 0xfffffffffffffULL;
+  uint64_t t310 = t30 + (t210 >> 52U);
+  uint64_t t220 = t210 & 0xfffffffffffffULL;
+  uint64_t t410 = t42 + (t310 >> 52U);
+  uint64_t t320 = t310 & 0xfffffffffffffULL;
   uint64_t t0 = t020;
   uint64_t t1 = t120;
   uint64_t t2 = t220;
   uint64_t t3 = t320;
   uint64_t t4 = t410;
-  uint64_t x2 = t4 >> (uint32_t)48U;
-  uint64_t t411 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x2 = t4 >> 48U;
+  uint64_t t411 = t4 & 0xffffffffffffULL;
   uint64_t x = x2;
   uint64_t r0 = t0;
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
   uint64_t r4 = t411;
-  uint64_t m4 = FStar_UInt64_eq_mask(r4, (uint64_t)0xffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(r3, (uint64_t)0xfffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(r2, (uint64_t)0xfffffffffffffU);
-  uint64_t m1 = FStar_UInt64_eq_mask(r1, (uint64_t)0xfffffffffffffU);
-  uint64_t m0 = FStar_UInt64_gte_mask(r0, (uint64_t)0xffffefffffc2fU);
+  uint64_t m4 = FStar_UInt64_eq_mask(r4, 0xffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(r3, 0xfffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(r2, 0xfffffffffffffULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(r1, 0xfffffffffffffULL);
+  uint64_t m0 = FStar_UInt64_gte_mask(r0, 0xffffefffffc2fULL);
   uint64_t is_ge_p_m = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t m_to_one = is_ge_p_m & (uint64_t)1U;
+  uint64_t m_to_one = is_ge_p_m & 1ULL;
   uint64_t x10 = m_to_one | x;
-  uint64_t t010 = r0 + x10 * (uint64_t)0x1000003D1U;
-  uint64_t t11 = r1 + (t010 >> (uint32_t)52U);
-  uint64_t t02 = t010 & (uint64_t)0xfffffffffffffU;
-  uint64_t t21 = r2 + (t11 >> (uint32_t)52U);
-  uint64_t t12 = t11 & (uint64_t)0xfffffffffffffU;
-  uint64_t t31 = r3 + (t21 >> (uint32_t)52U);
-  uint64_t t22 = t21 & (uint64_t)0xfffffffffffffU;
-  uint64_t t41 = r4 + (t31 >> (uint32_t)52U);
-  uint64_t t32 = t31 & (uint64_t)0xfffffffffffffU;
+  uint64_t t010 = r0 + x10 * 0x1000003D1ULL;
+  uint64_t t11 = r1 + (t010 >> 52U);
+  uint64_t t02 = t010 & 0xfffffffffffffULL;
+  uint64_t t21 = r2 + (t11 >> 52U);
+  uint64_t t12 = t11 & 0xfffffffffffffULL;
+  uint64_t t31 = r3 + (t21 >> 52U);
+  uint64_t t22 = t21 & 0xfffffffffffffULL;
+  uint64_t t41 = r4 + (t31 >> 52U);
+  uint64_t t32 = t31 & 0xfffffffffffffULL;
   uint64_t s0 = t02;
   uint64_t s1 = t12;
   uint64_t s2 = t22;
   uint64_t s3 = t32;
   uint64_t s4 = t41;
-  uint64_t t412 = s4 & (uint64_t)0xffffffffffffU;
+  uint64_t t412 = s4 & 0xffffffffffffULL;
   uint64_t k0 = s0;
   uint64_t k1 = s1;
   uint64_t k2 = s2;
@@ -590,11 +574,11 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
     uint64_t a2 = f[2U];
     uint64_t a3 = f[3U];
     uint64_t a4 = f[4U];
-    uint64_t r0 = (uint64_t)9007190664804446U - a0;
-    uint64_t r1 = (uint64_t)9007199254740990U - a1;
-    uint64_t r2 = (uint64_t)9007199254740990U - a2;
-    uint64_t r3 = (uint64_t)9007199254740990U - a3;
-    uint64_t r4 = (uint64_t)562949953421310U - a4;
+    uint64_t r0 = 9007190664804446ULL - a0;
+    uint64_t r1 = 9007199254740990ULL - a1;
+    uint64_t r2 = 9007199254740990ULL - a2;
+    uint64_t r3 = 9007199254740990ULL - a3;
+    uint64_t r4 = 562949953421310ULL - a4;
     uint64_t f0 = r0;
     uint64_t f1 = r1;
     uint64_t f2 = r2;
@@ -612,7 +596,7 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -620,8 +604,8 @@ static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uin
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)5U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 5U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -633,29 +617,29 @@ static inline void Hacl_Impl_K256_Finv_fexp_223_23(uint64_t *out, uint64_t *x2,
   uint64_t x22[5U] = { 0U };
   uint64_t x44[5U] = { 0U };
   uint64_t x88[5U] = { 0U };
-  Hacl_Impl_K256_Finv_fsquare_times(x2, f, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x2, f, 1U);
   Hacl_K256_Field_fmul(x2, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, 1U);
   Hacl_K256_Field_fmul(x3, x3, f);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x3, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x3, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times(x22, out, (uint32_t)11U);
+  Hacl_Impl_K256_Finv_fsquare_times(x22, out, 11U);
   Hacl_K256_Field_fmul(x22, x22, out);
-  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, (uint32_t)22U);
+  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, 22U);
   Hacl_K256_Field_fmul(x44, x44, x22);
-  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, 44U);
   Hacl_K256_Field_fmul(x88, x88, x44);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x88, (uint32_t)88U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x88, 88U);
   Hacl_K256_Field_fmul(out, out, x88);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 44U);
   Hacl_K256_Field_fmul(out, out, x44);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)23U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 23U);
   Hacl_K256_Field_fmul(out, out, x22);
 }
 
@@ -663,11 +647,11 @@ static inline void Hacl_Impl_K256_Finv_finv(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)5U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 5U);
   Hacl_K256_Field_fmul(out, out, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, f);
 }
 
@@ -675,9 +659,9 @@ static inline void Hacl_Impl_K256_Finv_fsqrt(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)6U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 6U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
 }
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Ed25519_PrecompTable.h b/include/internal/Hacl_Ed25519_PrecompTable.h
index 77d2244c..a20cd912 100644
--- a/include/internal/Hacl_Ed25519_PrecompTable.h
+++ b/include/internal/Hacl_Ed25519_PrecompTable.h
@@ -39,655 +39,491 @@ static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)13559344787725U, (uint64_t)2051621493703448U,
-    (uint64_t)1947659315640708U, (uint64_t)626856790370168U, (uint64_t)1592804284034836U,
-    (uint64_t)1781728767459187U, (uint64_t)278818420518009U, (uint64_t)2038030359908351U,
-    (uint64_t)910625973862690U, (uint64_t)471887343142239U, (uint64_t)1298543306606048U,
-    (uint64_t)794147365642417U, (uint64_t)129968992326749U, (uint64_t)523140861678572U,
-    (uint64_t)1166419653909231U, (uint64_t)2009637196928390U, (uint64_t)1288020222395193U,
-    (uint64_t)1007046974985829U, (uint64_t)208981102651386U, (uint64_t)2074009315253380U,
-    (uint64_t)1564056062071967U, (uint64_t)276822668750618U, (uint64_t)206621292512572U,
-    (uint64_t)470304361809269U, (uint64_t)895215438398493U, (uint64_t)1527859053868686U,
-    (uint64_t)1624967223409369U, (uint64_t)811821865979736U, (uint64_t)350450534838340U,
-    (uint64_t)219143807921807U, (uint64_t)507994540371254U, (uint64_t)986513794574720U,
-    (uint64_t)1142661369967121U, (uint64_t)621278293399257U, (uint64_t)556189161519781U,
-    (uint64_t)351964007865066U, (uint64_t)2011573453777822U, (uint64_t)1367125527151537U,
-    (uint64_t)1691316722438196U, (uint64_t)731328817345164U, (uint64_t)1284781192709232U,
-    (uint64_t)478439299539269U, (uint64_t)204842178076429U, (uint64_t)2085125369913651U,
-    (uint64_t)1980773492792985U, (uint64_t)1480264409524940U, (uint64_t)688389585376233U,
-    (uint64_t)612962643526972U, (uint64_t)165595382536676U, (uint64_t)1850300069212263U,
-    (uint64_t)1176357203491551U, (uint64_t)1880164984292321U, (uint64_t)10786153104736U,
-    (uint64_t)1242293560510203U, (uint64_t)1358399951884084U, (uint64_t)1901358796610357U,
-    (uint64_t)1385092558795806U, (uint64_t)1734893785311348U, (uint64_t)2046201851951191U,
-    (uint64_t)1233811309557352U, (uint64_t)1531160168656129U, (uint64_t)1543287181303358U,
-    (uint64_t)516121446374119U, (uint64_t)723422668089935U, (uint64_t)1228176774959679U,
-    (uint64_t)1598014722726267U, (uint64_t)1630810326658412U, (uint64_t)1343833067463760U,
-    (uint64_t)1024397964362099U, (uint64_t)1157142161346781U, (uint64_t)56422174971792U,
-    (uint64_t)544901687297092U, (uint64_t)1291559028869009U, (uint64_t)1336918672345120U,
-    (uint64_t)1390874603281353U, (uint64_t)1127199512010904U, (uint64_t)992644979940964U,
-    (uint64_t)1035213479783573U, (uint64_t)36043651196100U, (uint64_t)1220961519321221U,
-    (uint64_t)1348190007756977U, (uint64_t)579420200329088U, (uint64_t)1703819961008985U,
-    (uint64_t)1993919213460047U, (uint64_t)2225080008232251U, (uint64_t)392785893702372U,
-    (uint64_t)464312521482632U, (uint64_t)1224525362116057U, (uint64_t)810394248933036U,
-    (uint64_t)932513521649107U, (uint64_t)592314953488703U, (uint64_t)586334603791548U,
-    (uint64_t)1310888126096549U, (uint64_t)650842674074281U, (uint64_t)1596447001791059U,
-    (uint64_t)2086767406328284U, (uint64_t)1866377645879940U, (uint64_t)1721604362642743U,
-    (uint64_t)738502322566890U, (uint64_t)1851901097729689U, (uint64_t)1158347571686914U,
-    (uint64_t)2023626733470827U, (uint64_t)329625404653699U, (uint64_t)563555875598551U,
-    (uint64_t)516554588079177U, (uint64_t)1134688306104598U, (uint64_t)186301198420809U,
-    (uint64_t)1339952213563300U, (uint64_t)643605614625891U, (uint64_t)1947505332718043U,
-    (uint64_t)1722071694852824U, (uint64_t)601679570440694U, (uint64_t)1821275721236351U,
-    (uint64_t)1808307842870389U, (uint64_t)1654165204015635U, (uint64_t)1457334100715245U,
-    (uint64_t)217784948678349U, (uint64_t)1820622417674817U, (uint64_t)1946121178444661U,
-    (uint64_t)597980757799332U, (uint64_t)1745271227710764U, (uint64_t)2010952890941980U,
-    (uint64_t)339811849696648U, (uint64_t)1066120666993872U, (uint64_t)261276166508990U,
-    (uint64_t)323098645774553U, (uint64_t)207454744271283U, (uint64_t)941448672977675U,
-    (uint64_t)71890920544375U, (uint64_t)840849789313357U, (uint64_t)1223996070717926U,
-    (uint64_t)196832550853408U, (uint64_t)115986818309231U, (uint64_t)1586171527267675U,
-    (uint64_t)1666169080973450U, (uint64_t)1456454731176365U, (uint64_t)44467854369003U,
-    (uint64_t)2149656190691480U, (uint64_t)283446383597589U, (uint64_t)2040542647729974U,
-    (uint64_t)305705593840224U, (uint64_t)475315822269791U, (uint64_t)648133452550632U,
-    (uint64_t)169218658835720U, (uint64_t)24960052338251U, (uint64_t)938907951346766U,
-    (uint64_t)425970950490510U, (uint64_t)1037622011013183U, (uint64_t)1026882082708180U,
-    (uint64_t)1635699409504916U, (uint64_t)1644776942870488U, (uint64_t)2151820331175914U,
-    (uint64_t)824120674069819U, (uint64_t)835744976610113U, (uint64_t)1991271032313190U,
-    (uint64_t)96507354724855U, (uint64_t)400645405133260U, (uint64_t)343728076650825U,
-    (uint64_t)1151585441385566U, (uint64_t)1403339955333520U, (uint64_t)230186314139774U,
-    (uint64_t)1736248861506714U, (uint64_t)1010804378904572U, (uint64_t)1394932289845636U,
-    (uint64_t)1901351256960852U, (uint64_t)2187471430089807U, (uint64_t)1003853262342670U,
-    (uint64_t)1327743396767461U, (uint64_t)1465160415991740U, (uint64_t)366625359144534U,
-    (uint64_t)1534791405247604U, (uint64_t)1790905930250187U, (uint64_t)1255484115292738U,
-    (uint64_t)2223291365520443U, (uint64_t)210967717407408U, (uint64_t)26722916813442U,
-    (uint64_t)1919574361907910U, (uint64_t)468825088280256U, (uint64_t)2230011775946070U,
-    (uint64_t)1628365642214479U, (uint64_t)568871869234932U, (uint64_t)1066987968780488U,
-    (uint64_t)1692242903745558U, (uint64_t)1678903997328589U, (uint64_t)214262165888021U,
-    (uint64_t)1929686748607204U, (uint64_t)1790138967989670U, (uint64_t)1790261616022076U,
-    (uint64_t)1559824537553112U, (uint64_t)1230364591311358U, (uint64_t)147531939886346U,
-    (uint64_t)1528207085815487U, (uint64_t)477957922927292U, (uint64_t)285670243881618U,
-    (uint64_t)264430080123332U, (uint64_t)1163108160028611U, (uint64_t)373201522147371U,
-    (uint64_t)34903775270979U, (uint64_t)1750870048600662U, (uint64_t)1319328308741084U,
-    (uint64_t)1547548634278984U, (uint64_t)1691259592202927U, (uint64_t)2247758037259814U,
-    (uint64_t)329611399953677U, (uint64_t)1385555496268877U, (uint64_t)2242438354031066U,
-    (uint64_t)1329523854843632U, (uint64_t)399895373846055U, (uint64_t)678005703193452U,
-    (uint64_t)1496357700997771U, (uint64_t)71909969781942U, (uint64_t)1515391418612349U,
-    (uint64_t)470110837888178U, (uint64_t)1981307309417466U, (uint64_t)1259888737412276U,
-    (uint64_t)669991710228712U, (uint64_t)1048546834514303U, (uint64_t)1678323291295512U,
-    (uint64_t)2172033978088071U, (uint64_t)1529278455500556U, (uint64_t)901984601941894U,
-    (uint64_t)780867622403807U, (uint64_t)550105677282793U, (uint64_t)975860231176136U,
-    (uint64_t)525188281689178U, (uint64_t)49966114807992U, (uint64_t)1776449263836645U,
-    (uint64_t)267851776380338U, (uint64_t)2225969494054620U, (uint64_t)2016794225789822U,
-    (uint64_t)1186108678266608U, (uint64_t)1023083271408882U, (uint64_t)1119289418565906U,
-    (uint64_t)1248185897348801U, (uint64_t)1846081539082697U, (uint64_t)23756429626075U,
-    (uint64_t)1441999021105403U, (uint64_t)724497586552825U, (uint64_t)1287761623605379U,
-    (uint64_t)685303359654224U, (uint64_t)2217156930690570U, (uint64_t)163769288918347U,
-    (uint64_t)1098423278284094U, (uint64_t)1391470723006008U, (uint64_t)570700152353516U,
-    (uint64_t)744804507262556U, (uint64_t)2200464788609495U, (uint64_t)624141899161992U,
-    (uint64_t)2249570166275684U, (uint64_t)378706441983561U, (uint64_t)122486379999375U,
-    (uint64_t)430741162798924U, (uint64_t)113847463452574U, (uint64_t)266250457840685U,
-    (uint64_t)2120743625072743U, (uint64_t)222186221043927U, (uint64_t)1964290018305582U,
-    (uint64_t)1435278008132477U, (uint64_t)1670867456663734U, (uint64_t)2009989552599079U,
-    (uint64_t)1348024113448744U, (uint64_t)1158423886300455U, (uint64_t)1356467152691569U,
-    (uint64_t)306943042363674U, (uint64_t)926879628664255U, (uint64_t)1349295689598324U,
-    (uint64_t)725558330071205U, (uint64_t)536569987519948U, (uint64_t)116436990335366U,
-    (uint64_t)1551888573800376U, (uint64_t)2044698345945451U, (uint64_t)104279940291311U,
-    (uint64_t)251526570943220U, (uint64_t)754735828122925U, (uint64_t)33448073576361U,
-    (uint64_t)994605876754543U, (uint64_t)546007584022006U, (uint64_t)2217332798409487U,
-    (uint64_t)706477052561591U, (uint64_t)131174619428653U, (uint64_t)2148698284087243U,
-    (uint64_t)239290486205186U, (uint64_t)2161325796952184U, (uint64_t)1713452845607994U,
-    (uint64_t)1297861562938913U, (uint64_t)1779539876828514U, (uint64_t)1926559018603871U,
-    (uint64_t)296485747893968U, (uint64_t)1859208206640686U, (uint64_t)538513979002718U,
-    (uint64_t)103998826506137U, (uint64_t)2025375396538469U, (uint64_t)1370680785701206U,
-    (uint64_t)1698557311253840U, (uint64_t)1411096399076595U, (uint64_t)2132580530813677U,
-    (uint64_t)2071564345845035U, (uint64_t)498581428556735U, (uint64_t)1136010486691371U,
-    (uint64_t)1927619356993146U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL,
+    626856790370168ULL, 1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL,
+    2038030359908351ULL, 910625973862690ULL, 471887343142239ULL, 1298543306606048ULL,
+    794147365642417ULL, 129968992326749ULL, 523140861678572ULL, 1166419653909231ULL,
+    2009637196928390ULL, 1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL,
+    2074009315253380ULL, 1564056062071967ULL, 276822668750618ULL, 206621292512572ULL,
+    470304361809269ULL, 895215438398493ULL, 1527859053868686ULL, 1624967223409369ULL,
+    811821865979736ULL, 350450534838340ULL, 219143807921807ULL, 507994540371254ULL,
+    986513794574720ULL, 1142661369967121ULL, 621278293399257ULL, 556189161519781ULL,
+    351964007865066ULL, 2011573453777822ULL, 1367125527151537ULL, 1691316722438196ULL,
+    731328817345164ULL, 1284781192709232ULL, 478439299539269ULL, 204842178076429ULL,
+    2085125369913651ULL, 1980773492792985ULL, 1480264409524940ULL, 688389585376233ULL,
+    612962643526972ULL, 165595382536676ULL, 1850300069212263ULL, 1176357203491551ULL,
+    1880164984292321ULL, 10786153104736ULL, 1242293560510203ULL, 1358399951884084ULL,
+    1901358796610357ULL, 1385092558795806ULL, 1734893785311348ULL, 2046201851951191ULL,
+    1233811309557352ULL, 1531160168656129ULL, 1543287181303358ULL, 516121446374119ULL,
+    723422668089935ULL, 1228176774959679ULL, 1598014722726267ULL, 1630810326658412ULL,
+    1343833067463760ULL, 1024397964362099ULL, 1157142161346781ULL, 56422174971792ULL,
+    544901687297092ULL, 1291559028869009ULL, 1336918672345120ULL, 1390874603281353ULL,
+    1127199512010904ULL, 992644979940964ULL, 1035213479783573ULL, 36043651196100ULL,
+    1220961519321221ULL, 1348190007756977ULL, 579420200329088ULL, 1703819961008985ULL,
+    1993919213460047ULL, 2225080008232251ULL, 392785893702372ULL, 464312521482632ULL,
+    1224525362116057ULL, 810394248933036ULL, 932513521649107ULL, 592314953488703ULL,
+    586334603791548ULL, 1310888126096549ULL, 650842674074281ULL, 1596447001791059ULL,
+    2086767406328284ULL, 1866377645879940ULL, 1721604362642743ULL, 738502322566890ULL,
+    1851901097729689ULL, 1158347571686914ULL, 2023626733470827ULL, 329625404653699ULL,
+    563555875598551ULL, 516554588079177ULL, 1134688306104598ULL, 186301198420809ULL,
+    1339952213563300ULL, 643605614625891ULL, 1947505332718043ULL, 1722071694852824ULL,
+    601679570440694ULL, 1821275721236351ULL, 1808307842870389ULL, 1654165204015635ULL,
+    1457334100715245ULL, 217784948678349ULL, 1820622417674817ULL, 1946121178444661ULL,
+    597980757799332ULL, 1745271227710764ULL, 2010952890941980ULL, 339811849696648ULL,
+    1066120666993872ULL, 261276166508990ULL, 323098645774553ULL, 207454744271283ULL,
+    941448672977675ULL, 71890920544375ULL, 840849789313357ULL, 1223996070717926ULL,
+    196832550853408ULL, 115986818309231ULL, 1586171527267675ULL, 1666169080973450ULL,
+    1456454731176365ULL, 44467854369003ULL, 2149656190691480ULL, 283446383597589ULL,
+    2040542647729974ULL, 305705593840224ULL, 475315822269791ULL, 648133452550632ULL,
+    169218658835720ULL, 24960052338251ULL, 938907951346766ULL, 425970950490510ULL,
+    1037622011013183ULL, 1026882082708180ULL, 1635699409504916ULL, 1644776942870488ULL,
+    2151820331175914ULL, 824120674069819ULL, 835744976610113ULL, 1991271032313190ULL,
+    96507354724855ULL, 400645405133260ULL, 343728076650825ULL, 1151585441385566ULL,
+    1403339955333520ULL, 230186314139774ULL, 1736248861506714ULL, 1010804378904572ULL,
+    1394932289845636ULL, 1901351256960852ULL, 2187471430089807ULL, 1003853262342670ULL,
+    1327743396767461ULL, 1465160415991740ULL, 366625359144534ULL, 1534791405247604ULL,
+    1790905930250187ULL, 1255484115292738ULL, 2223291365520443ULL, 210967717407408ULL,
+    26722916813442ULL, 1919574361907910ULL, 468825088280256ULL, 2230011775946070ULL,
+    1628365642214479ULL, 568871869234932ULL, 1066987968780488ULL, 1692242903745558ULL,
+    1678903997328589ULL, 214262165888021ULL, 1929686748607204ULL, 1790138967989670ULL,
+    1790261616022076ULL, 1559824537553112ULL, 1230364591311358ULL, 147531939886346ULL,
+    1528207085815487ULL, 477957922927292ULL, 285670243881618ULL, 264430080123332ULL,
+    1163108160028611ULL, 373201522147371ULL, 34903775270979ULL, 1750870048600662ULL,
+    1319328308741084ULL, 1547548634278984ULL, 1691259592202927ULL, 2247758037259814ULL,
+    329611399953677ULL, 1385555496268877ULL, 2242438354031066ULL, 1329523854843632ULL,
+    399895373846055ULL, 678005703193452ULL, 1496357700997771ULL, 71909969781942ULL,
+    1515391418612349ULL, 470110837888178ULL, 1981307309417466ULL, 1259888737412276ULL,
+    669991710228712ULL, 1048546834514303ULL, 1678323291295512ULL, 2172033978088071ULL,
+    1529278455500556ULL, 901984601941894ULL, 780867622403807ULL, 550105677282793ULL,
+    975860231176136ULL, 525188281689178ULL, 49966114807992ULL, 1776449263836645ULL,
+    267851776380338ULL, 2225969494054620ULL, 2016794225789822ULL, 1186108678266608ULL,
+    1023083271408882ULL, 1119289418565906ULL, 1248185897348801ULL, 1846081539082697ULL,
+    23756429626075ULL, 1441999021105403ULL, 724497586552825ULL, 1287761623605379ULL,
+    685303359654224ULL, 2217156930690570ULL, 163769288918347ULL, 1098423278284094ULL,
+    1391470723006008ULL, 570700152353516ULL, 744804507262556ULL, 2200464788609495ULL,
+    624141899161992ULL, 2249570166275684ULL, 378706441983561ULL, 122486379999375ULL,
+    430741162798924ULL, 113847463452574ULL, 266250457840685ULL, 2120743625072743ULL,
+    222186221043927ULL, 1964290018305582ULL, 1435278008132477ULL, 1670867456663734ULL,
+    2009989552599079ULL, 1348024113448744ULL, 1158423886300455ULL, 1356467152691569ULL,
+    306943042363674ULL, 926879628664255ULL, 1349295689598324ULL, 725558330071205ULL,
+    536569987519948ULL, 116436990335366ULL, 1551888573800376ULL, 2044698345945451ULL,
+    104279940291311ULL, 251526570943220ULL, 754735828122925ULL, 33448073576361ULL,
+    994605876754543ULL, 546007584022006ULL, 2217332798409487ULL, 706477052561591ULL,
+    131174619428653ULL, 2148698284087243ULL, 239290486205186ULL, 2161325796952184ULL,
+    1713452845607994ULL, 1297861562938913ULL, 1779539876828514ULL, 1926559018603871ULL,
+    296485747893968ULL, 1859208206640686ULL, 538513979002718ULL, 103998826506137ULL,
+    2025375396538469ULL, 1370680785701206ULL, 1698557311253840ULL, 1411096399076595ULL,
+    2132580530813677ULL, 2071564345845035ULL, 498581428556735ULL, 1136010486691371ULL,
+    1927619356993146ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)557549315715710U, (uint64_t)196756086293855U,
-    (uint64_t)846062225082495U, (uint64_t)1865068224838092U, (uint64_t)991112090754908U,
-    (uint64_t)522916421512828U, (uint64_t)2098523346722375U, (uint64_t)1135633221747012U,
-    (uint64_t)858420432114866U, (uint64_t)186358544306082U, (uint64_t)1044420411868480U,
-    (uint64_t)2080052304349321U, (uint64_t)557301814716724U, (uint64_t)1305130257814057U,
-    (uint64_t)2126012765451197U, (uint64_t)1441004402875101U, (uint64_t)353948968859203U,
-    (uint64_t)470765987164835U, (uint64_t)1507675957683570U, (uint64_t)1086650358745097U,
-    (uint64_t)1911913434398388U, (uint64_t)66086091117182U, (uint64_t)1137511952425971U,
-    (uint64_t)36958263512141U, (uint64_t)2193310025325256U, (uint64_t)1085191426269045U,
-    (uint64_t)1232148267909446U, (uint64_t)1449894406170117U, (uint64_t)1241416717139557U,
-    (uint64_t)1940876999212868U, (uint64_t)829758415918121U, (uint64_t)309608450373449U,
-    (uint64_t)2228398547683851U, (uint64_t)1580623271960188U, (uint64_t)1675601502456740U,
-    (uint64_t)1360363115493548U, (uint64_t)1098397313096815U, (uint64_t)1809255384359797U,
-    (uint64_t)1458261916834384U, (uint64_t)210682545649705U, (uint64_t)1606836641068115U,
-    (uint64_t)1230478270405318U, (uint64_t)1843192771547802U, (uint64_t)1794596343564051U,
-    (uint64_t)229060710252162U, (uint64_t)2169742775467181U, (uint64_t)701467067318072U,
-    (uint64_t)696018499035555U, (uint64_t)521051885339807U, (uint64_t)158329567901874U,
-    (uint64_t)740426481832143U, (uint64_t)1369811177301441U, (uint64_t)503351589084015U,
-    (uint64_t)1781114827942261U, (uint64_t)1650493549693035U, (uint64_t)2174562418345156U,
-    (uint64_t)456517194809244U, (uint64_t)2052761522121179U, (uint64_t)2233342271123682U,
-    (uint64_t)1445872925177435U, (uint64_t)1131882576902813U, (uint64_t)220765848055241U,
-    (uint64_t)1280259961403769U, (uint64_t)1581497080160712U, (uint64_t)1477441080108824U,
-    (uint64_t)218428165202767U, (uint64_t)1970598141278907U, (uint64_t)643366736173069U,
-    (uint64_t)2167909426804014U, (uint64_t)834993711408259U, (uint64_t)1922437166463212U,
-    (uint64_t)1900036281472252U, (uint64_t)513794844386304U, (uint64_t)1297904164900114U,
-    (uint64_t)1147626295373268U, (uint64_t)1910101606251299U, (uint64_t)182933838633381U,
-    (uint64_t)806229530787362U, (uint64_t)155511666433200U, (uint64_t)290522463375462U,
-    (uint64_t)534373523491751U, (uint64_t)1302938814480515U, (uint64_t)1664979184120445U,
-    (uint64_t)304235649499423U, (uint64_t)339284524318609U, (uint64_t)1881717946973483U,
-    (uint64_t)1670802286833842U, (uint64_t)2223637120675737U, (uint64_t)135818919485814U,
-    (uint64_t)1144856572842792U, (uint64_t)2234981613434386U, (uint64_t)963917024969826U,
-    (uint64_t)402275378284993U, (uint64_t)141532417412170U, (uint64_t)921537468739387U,
-    (uint64_t)963905069722607U, (uint64_t)1405442890733358U, (uint64_t)1567763927164655U,
-    (uint64_t)1664776329195930U, (uint64_t)2095924165508507U, (uint64_t)994243110271379U,
-    (uint64_t)1243925610609353U, (uint64_t)1029845815569727U, (uint64_t)1001968867985629U,
-    (uint64_t)170368934002484U, (uint64_t)1100906131583801U, (uint64_t)1825190326449569U,
-    (uint64_t)1462285121182096U, (uint64_t)1545240767016377U, (uint64_t)797859025652273U,
-    (uint64_t)1062758326657530U, (uint64_t)1125600735118266U, (uint64_t)739325756774527U,
-    (uint64_t)1420144485966996U, (uint64_t)1915492743426702U, (uint64_t)752968196344993U,
-    (uint64_t)882156396938351U, (uint64_t)1909097048763227U, (uint64_t)849058590685611U,
-    (uint64_t)840754951388500U, (uint64_t)1832926948808323U, (uint64_t)2023317100075297U,
-    (uint64_t)322382745442827U, (uint64_t)1569741341737601U, (uint64_t)1678986113194987U,
-    (uint64_t)757598994581938U, (uint64_t)29678659580705U, (uint64_t)1239680935977986U,
-    (uint64_t)1509239427168474U, (uint64_t)1055981929287006U, (uint64_t)1894085471158693U,
-    (uint64_t)916486225488490U, (uint64_t)642168890366120U, (uint64_t)300453362620010U,
-    (uint64_t)1858797242721481U, (uint64_t)2077989823177130U, (uint64_t)510228455273334U,
-    (uint64_t)1473284798689270U, (uint64_t)5173934574301U, (uint64_t)765285232030050U,
-    (uint64_t)1007154707631065U, (uint64_t)1862128712885972U, (uint64_t)168873464821340U,
-    (uint64_t)1967853269759318U, (uint64_t)1489896018263031U, (uint64_t)592451806166369U,
-    (uint64_t)1242298565603883U, (uint64_t)1838918921339058U, (uint64_t)697532763910695U,
-    (uint64_t)294335466239059U, (uint64_t)135687058387449U, (uint64_t)2133734403874176U,
-    (uint64_t)2121911143127699U, (uint64_t)20222476737364U, (uint64_t)1200824626476747U,
-    (uint64_t)1397731736540791U, (uint64_t)702378430231418U, (uint64_t)59059527640068U,
-    (uint64_t)460992547183981U, (uint64_t)1016125857842765U, (uint64_t)1273530839608957U,
-    (uint64_t)96724128829301U, (uint64_t)1313433042425233U, (uint64_t)3543822857227U,
-    (uint64_t)761975685357118U, (uint64_t)110417360745248U, (uint64_t)1079634164577663U,
-    (uint64_t)2044574510020457U, (uint64_t)338709058603120U, (uint64_t)94541336042799U,
-    (uint64_t)127963233585039U, (uint64_t)94427896272258U, (uint64_t)1143501979342182U,
-    (uint64_t)1217958006212230U, (uint64_t)2153887831492134U, (uint64_t)1519219513255575U,
-    (uint64_t)251793195454181U, (uint64_t)392517349345200U, (uint64_t)1507033011868881U,
-    (uint64_t)2208494254670752U, (uint64_t)1364389582694359U, (uint64_t)2214069430728063U,
-    (uint64_t)1272814257105752U, (uint64_t)741450148906352U, (uint64_t)1105776675555685U,
-    (uint64_t)824447222014984U, (uint64_t)528745219306376U, (uint64_t)589427609121575U,
-    (uint64_t)1501786838809155U, (uint64_t)379067373073147U, (uint64_t)184909476589356U,
-    (uint64_t)1346887560616185U, (uint64_t)1932023742314082U, (uint64_t)1633302311869264U,
-    (uint64_t)1685314821133069U, (uint64_t)1836610282047884U, (uint64_t)1595571594397150U,
-    (uint64_t)615441688872198U, (uint64_t)1926435616702564U, (uint64_t)235632180396480U,
-    (uint64_t)1051918343571810U, (uint64_t)2150570051687050U, (uint64_t)879198845408738U,
-    (uint64_t)1443966275205464U, (uint64_t)481362545245088U, (uint64_t)512807443532642U,
-    (uint64_t)641147578283480U, (uint64_t)1594276116945596U, (uint64_t)1844812743300602U,
-    (uint64_t)2044559316019485U, (uint64_t)202620777969020U, (uint64_t)852992984136302U,
-    (uint64_t)1500869642692910U, (uint64_t)1085216217052457U, (uint64_t)1736294372259758U,
-    (uint64_t)2009666354486552U, (uint64_t)1262389020715248U, (uint64_t)1166527705256867U,
-    (uint64_t)1409917450806036U, (uint64_t)1705819160057637U, (uint64_t)1116901782584378U,
-    (uint64_t)1278460472285473U, (uint64_t)257879811360157U, (uint64_t)40314007176886U,
-    (uint64_t)701309846749639U, (uint64_t)1380457676672777U, (uint64_t)631519782380272U,
-    (uint64_t)1196339573466793U, (uint64_t)955537708940017U, (uint64_t)532725633381530U,
-    (uint64_t)641190593731833U, (uint64_t)7214357153807U, (uint64_t)481922072107983U,
-    (uint64_t)1634886189207352U, (uint64_t)1247659758261633U, (uint64_t)1655809614786430U,
-    (uint64_t)43105797900223U, (uint64_t)76205809912607U, (uint64_t)1936575107455823U,
-    (uint64_t)1107927314642236U, (uint64_t)2199986333469333U, (uint64_t)802974829322510U,
-    (uint64_t)718173128143482U, (uint64_t)539385184235615U, (uint64_t)2075693785611221U,
-    (uint64_t)953281147333690U, (uint64_t)1623571637172587U, (uint64_t)655274535022250U,
-    (uint64_t)1568078078819021U, (uint64_t)101142125049712U, (uint64_t)1488441673350881U,
-    (uint64_t)1457969561944515U, (uint64_t)1492622544287712U, (uint64_t)2041460689280803U,
-    (uint64_t)1961848091392887U, (uint64_t)461003520846938U, (uint64_t)934728060399807U,
-    (uint64_t)117723291519705U, (uint64_t)1027773762863526U, (uint64_t)56765304991567U,
-    (uint64_t)2184028379550479U, (uint64_t)1768767711894030U, (uint64_t)1304432068983172U,
-    (uint64_t)498080974452325U, (uint64_t)2134905654858163U, (uint64_t)1446137427202647U,
-    (uint64_t)551613831549590U, (uint64_t)680288767054205U, (uint64_t)1278113339140386U,
-    (uint64_t)378149431842614U, (uint64_t)80520494426960U, (uint64_t)2080985256348782U,
-    (uint64_t)673432591799820U, (uint64_t)739189463724560U, (uint64_t)1847191452197509U,
-    (uint64_t)527737312871602U, (uint64_t)477609358840073U, (uint64_t)1891633072677946U,
-    (uint64_t)1841456828278466U, (uint64_t)2242502936489002U, (uint64_t)524791829362709U,
-    (uint64_t)276648168514036U, (uint64_t)991706903257619U, (uint64_t)512580228297906U,
-    (uint64_t)1216855104975946U, (uint64_t)67030930303149U, (uint64_t)769593945208213U,
-    (uint64_t)2048873385103577U, (uint64_t)455635274123107U, (uint64_t)2077404927176696U,
-    (uint64_t)1803539634652306U, (uint64_t)1837579953843417U, (uint64_t)1564240068662828U,
-    (uint64_t)1964310918970435U, (uint64_t)832822906252492U, (uint64_t)1516044634195010U,
-    (uint64_t)770571447506889U, (uint64_t)602215152486818U, (uint64_t)1760828333136947U,
-    (uint64_t)730156776030376U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 557549315715710ULL, 196756086293855ULL, 846062225082495ULL,
+    1865068224838092ULL, 991112090754908ULL, 522916421512828ULL, 2098523346722375ULL,
+    1135633221747012ULL, 858420432114866ULL, 186358544306082ULL, 1044420411868480ULL,
+    2080052304349321ULL, 557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL,
+    1441004402875101ULL, 353948968859203ULL, 470765987164835ULL, 1507675957683570ULL,
+    1086650358745097ULL, 1911913434398388ULL, 66086091117182ULL, 1137511952425971ULL,
+    36958263512141ULL, 2193310025325256ULL, 1085191426269045ULL, 1232148267909446ULL,
+    1449894406170117ULL, 1241416717139557ULL, 1940876999212868ULL, 829758415918121ULL,
+    309608450373449ULL, 2228398547683851ULL, 1580623271960188ULL, 1675601502456740ULL,
+    1360363115493548ULL, 1098397313096815ULL, 1809255384359797ULL, 1458261916834384ULL,
+    210682545649705ULL, 1606836641068115ULL, 1230478270405318ULL, 1843192771547802ULL,
+    1794596343564051ULL, 229060710252162ULL, 2169742775467181ULL, 701467067318072ULL,
+    696018499035555ULL, 521051885339807ULL, 158329567901874ULL, 740426481832143ULL,
+    1369811177301441ULL, 503351589084015ULL, 1781114827942261ULL, 1650493549693035ULL,
+    2174562418345156ULL, 456517194809244ULL, 2052761522121179ULL, 2233342271123682ULL,
+    1445872925177435ULL, 1131882576902813ULL, 220765848055241ULL, 1280259961403769ULL,
+    1581497080160712ULL, 1477441080108824ULL, 218428165202767ULL, 1970598141278907ULL,
+    643366736173069ULL, 2167909426804014ULL, 834993711408259ULL, 1922437166463212ULL,
+    1900036281472252ULL, 513794844386304ULL, 1297904164900114ULL, 1147626295373268ULL,
+    1910101606251299ULL, 182933838633381ULL, 806229530787362ULL, 155511666433200ULL,
+    290522463375462ULL, 534373523491751ULL, 1302938814480515ULL, 1664979184120445ULL,
+    304235649499423ULL, 339284524318609ULL, 1881717946973483ULL, 1670802286833842ULL,
+    2223637120675737ULL, 135818919485814ULL, 1144856572842792ULL, 2234981613434386ULL,
+    963917024969826ULL, 402275378284993ULL, 141532417412170ULL, 921537468739387ULL,
+    963905069722607ULL, 1405442890733358ULL, 1567763927164655ULL, 1664776329195930ULL,
+    2095924165508507ULL, 994243110271379ULL, 1243925610609353ULL, 1029845815569727ULL,
+    1001968867985629ULL, 170368934002484ULL, 1100906131583801ULL, 1825190326449569ULL,
+    1462285121182096ULL, 1545240767016377ULL, 797859025652273ULL, 1062758326657530ULL,
+    1125600735118266ULL, 739325756774527ULL, 1420144485966996ULL, 1915492743426702ULL,
+    752968196344993ULL, 882156396938351ULL, 1909097048763227ULL, 849058590685611ULL,
+    840754951388500ULL, 1832926948808323ULL, 2023317100075297ULL, 322382745442827ULL,
+    1569741341737601ULL, 1678986113194987ULL, 757598994581938ULL, 29678659580705ULL,
+    1239680935977986ULL, 1509239427168474ULL, 1055981929287006ULL, 1894085471158693ULL,
+    916486225488490ULL, 642168890366120ULL, 300453362620010ULL, 1858797242721481ULL,
+    2077989823177130ULL, 510228455273334ULL, 1473284798689270ULL, 5173934574301ULL,
+    765285232030050ULL, 1007154707631065ULL, 1862128712885972ULL, 168873464821340ULL,
+    1967853269759318ULL, 1489896018263031ULL, 592451806166369ULL, 1242298565603883ULL,
+    1838918921339058ULL, 697532763910695ULL, 294335466239059ULL, 135687058387449ULL,
+    2133734403874176ULL, 2121911143127699ULL, 20222476737364ULL, 1200824626476747ULL,
+    1397731736540791ULL, 702378430231418ULL, 59059527640068ULL, 460992547183981ULL,
+    1016125857842765ULL, 1273530839608957ULL, 96724128829301ULL, 1313433042425233ULL,
+    3543822857227ULL, 761975685357118ULL, 110417360745248ULL, 1079634164577663ULL,
+    2044574510020457ULL, 338709058603120ULL, 94541336042799ULL, 127963233585039ULL,
+    94427896272258ULL, 1143501979342182ULL, 1217958006212230ULL, 2153887831492134ULL,
+    1519219513255575ULL, 251793195454181ULL, 392517349345200ULL, 1507033011868881ULL,
+    2208494254670752ULL, 1364389582694359ULL, 2214069430728063ULL, 1272814257105752ULL,
+    741450148906352ULL, 1105776675555685ULL, 824447222014984ULL, 528745219306376ULL,
+    589427609121575ULL, 1501786838809155ULL, 379067373073147ULL, 184909476589356ULL,
+    1346887560616185ULL, 1932023742314082ULL, 1633302311869264ULL, 1685314821133069ULL,
+    1836610282047884ULL, 1595571594397150ULL, 615441688872198ULL, 1926435616702564ULL,
+    235632180396480ULL, 1051918343571810ULL, 2150570051687050ULL, 879198845408738ULL,
+    1443966275205464ULL, 481362545245088ULL, 512807443532642ULL, 641147578283480ULL,
+    1594276116945596ULL, 1844812743300602ULL, 2044559316019485ULL, 202620777969020ULL,
+    852992984136302ULL, 1500869642692910ULL, 1085216217052457ULL, 1736294372259758ULL,
+    2009666354486552ULL, 1262389020715248ULL, 1166527705256867ULL, 1409917450806036ULL,
+    1705819160057637ULL, 1116901782584378ULL, 1278460472285473ULL, 257879811360157ULL,
+    40314007176886ULL, 701309846749639ULL, 1380457676672777ULL, 631519782380272ULL,
+    1196339573466793ULL, 955537708940017ULL, 532725633381530ULL, 641190593731833ULL,
+    7214357153807ULL, 481922072107983ULL, 1634886189207352ULL, 1247659758261633ULL,
+    1655809614786430ULL, 43105797900223ULL, 76205809912607ULL, 1936575107455823ULL,
+    1107927314642236ULL, 2199986333469333ULL, 802974829322510ULL, 718173128143482ULL,
+    539385184235615ULL, 2075693785611221ULL, 953281147333690ULL, 1623571637172587ULL,
+    655274535022250ULL, 1568078078819021ULL, 101142125049712ULL, 1488441673350881ULL,
+    1457969561944515ULL, 1492622544287712ULL, 2041460689280803ULL, 1961848091392887ULL,
+    461003520846938ULL, 934728060399807ULL, 117723291519705ULL, 1027773762863526ULL,
+    56765304991567ULL, 2184028379550479ULL, 1768767711894030ULL, 1304432068983172ULL,
+    498080974452325ULL, 2134905654858163ULL, 1446137427202647ULL, 551613831549590ULL,
+    680288767054205ULL, 1278113339140386ULL, 378149431842614ULL, 80520494426960ULL,
+    2080985256348782ULL, 673432591799820ULL, 739189463724560ULL, 1847191452197509ULL,
+    527737312871602ULL, 477609358840073ULL, 1891633072677946ULL, 1841456828278466ULL,
+    2242502936489002ULL, 524791829362709ULL, 276648168514036ULL, 991706903257619ULL,
+    512580228297906ULL, 1216855104975946ULL, 67030930303149ULL, 769593945208213ULL,
+    2048873385103577ULL, 455635274123107ULL, 2077404927176696ULL, 1803539634652306ULL,
+    1837579953843417ULL, 1564240068662828ULL, 1964310918970435ULL, 832822906252492ULL,
+    1516044634195010ULL, 770571447506889ULL, 602215152486818ULL, 1760828333136947ULL,
+    730156776030376ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1129953239743101U, (uint64_t)1240339163956160U,
-    (uint64_t)61002583352401U, (uint64_t)2017604552196030U, (uint64_t)1576867829229863U,
-    (uint64_t)1508654942849389U, (uint64_t)270111619664077U, (uint64_t)1253097517254054U,
-    (uint64_t)721798270973250U, (uint64_t)161923365415298U, (uint64_t)828530877526011U,
-    (uint64_t)1494851059386763U, (uint64_t)662034171193976U, (uint64_t)1315349646974670U,
-    (uint64_t)2199229517308806U, (uint64_t)497078277852673U, (uint64_t)1310507715989956U,
-    (uint64_t)1881315714002105U, (uint64_t)2214039404983803U, (uint64_t)1331036420272667U,
-    (uint64_t)296286697520787U, (uint64_t)1179367922639127U, (uint64_t)25348441419697U,
-    (uint64_t)2200984961703188U, (uint64_t)150893128908291U, (uint64_t)1978614888570852U,
-    (uint64_t)1539657347172046U, (uint64_t)553810196523619U, (uint64_t)246017573977646U,
-    (uint64_t)1440448985385485U, (uint64_t)346049108099981U, (uint64_t)601166606218546U,
-    (uint64_t)855822004151713U, (uint64_t)1957521326383188U, (uint64_t)1114240380430887U,
-    (uint64_t)1349639675122048U, (uint64_t)957375954499040U, (uint64_t)111551795360136U,
-    (uint64_t)618586733648988U, (uint64_t)490708840688866U, (uint64_t)1267002049697314U,
-    (uint64_t)1130723224930028U, (uint64_t)215603029480828U, (uint64_t)1277138555414710U,
-    (uint64_t)1556750324971322U, (uint64_t)1407903521793741U, (uint64_t)1836836546590749U,
-    (uint64_t)576500297444199U, (uint64_t)2074707599091135U, (uint64_t)1826239864380012U,
-    (uint64_t)1935365705983312U, (uint64_t)239501825683682U, (uint64_t)1594236669034980U,
-    (uint64_t)1283078975055301U, (uint64_t)856745636255925U, (uint64_t)1342128647959981U,
-    (uint64_t)945216428379689U, (uint64_t)938746202496410U, (uint64_t)105775123333919U,
-    (uint64_t)1379852610117266U, (uint64_t)1770216827500275U, (uint64_t)1016017267535704U,
-    (uint64_t)1902885522469532U, (uint64_t)994184703730489U, (uint64_t)2227487538793763U,
-    (uint64_t)53155967096055U, (uint64_t)1264120808114350U, (uint64_t)1334928769376729U,
-    (uint64_t)393911808079997U, (uint64_t)826229239481845U, (uint64_t)1827903006733192U,
-    (uint64_t)1449283706008465U, (uint64_t)1258040415217849U, (uint64_t)1641484112868370U,
-    (uint64_t)1140150841968176U, (uint64_t)391113338021313U, (uint64_t)162138667815833U,
-    (uint64_t)742204396566060U, (uint64_t)110709233440557U, (uint64_t)90179377432917U,
-    (uint64_t)530511949644489U, (uint64_t)911568635552279U, (uint64_t)135869304780166U,
-    (uint64_t)617719999563692U, (uint64_t)1802525001631319U, (uint64_t)1836394639510490U,
-    (uint64_t)1862739456475085U, (uint64_t)1378284444664288U, (uint64_t)1617882529391756U,
-    (uint64_t)876124429891172U, (uint64_t)1147654641445091U, (uint64_t)1476943370400542U,
-    (uint64_t)688601222759067U, (uint64_t)2120281968990205U, (uint64_t)1387113236912611U,
-    (uint64_t)2125245820685788U, (uint64_t)1030674016350092U, (uint64_t)1594684598654247U,
-    (uint64_t)1165939511879820U, (uint64_t)271499323244173U, (uint64_t)546587254515484U,
-    (uint64_t)945603425742936U, (uint64_t)1242252568170226U, (uint64_t)561598728058142U,
-    (uint64_t)604827091794712U, (uint64_t)19869753585186U, (uint64_t)565367744708915U,
-    (uint64_t)536755754533603U, (uint64_t)1767258313589487U, (uint64_t)907952975936127U,
-    (uint64_t)292851652613937U, (uint64_t)163573546237963U, (uint64_t)837601408384564U,
-    (uint64_t)591996990118301U, (uint64_t)2126051747693057U, (uint64_t)182247548824566U,
-    (uint64_t)908369044122868U, (uint64_t)1335442699947273U, (uint64_t)2234292296528612U,
-    (uint64_t)689537529333034U, (uint64_t)2174778663790714U, (uint64_t)1011407643592667U,
-    (uint64_t)1856130618715473U, (uint64_t)1557437221651741U, (uint64_t)2250285407006102U,
-    (uint64_t)1412384213410827U, (uint64_t)1428042038612456U, (uint64_t)962709733973660U,
-    (uint64_t)313995703125919U, (uint64_t)1844969155869325U, (uint64_t)787716782673657U,
-    (uint64_t)622504542173478U, (uint64_t)930119043384654U, (uint64_t)2128870043952488U,
-    (uint64_t)537781531479523U, (uint64_t)1556666269904940U, (uint64_t)417333635741346U,
-    (uint64_t)1986743846438415U, (uint64_t)877620478041197U, (uint64_t)2205624582983829U,
-    (uint64_t)595260668884488U, (uint64_t)2025159350373157U, (uint64_t)2091659716088235U,
-    (uint64_t)1423634716596391U, (uint64_t)653686638634080U, (uint64_t)1972388399989956U,
-    (uint64_t)795575741798014U, (uint64_t)889240107997846U, (uint64_t)1446156876910732U,
-    (uint64_t)1028507012221776U, (uint64_t)1071697574586478U, (uint64_t)1689630411899691U,
-    (uint64_t)604092816502174U, (uint64_t)1909917373896122U, (uint64_t)1602544877643837U,
-    (uint64_t)1227177032923867U, (uint64_t)62684197535630U, (uint64_t)186146290753883U,
-    (uint64_t)414449055316766U, (uint64_t)1560555880866750U, (uint64_t)157579947096755U,
-    (uint64_t)230526795502384U, (uint64_t)1197673369665894U, (uint64_t)593779215869037U,
-    (uint64_t)214638834474097U, (uint64_t)1796344443484478U, (uint64_t)493550548257317U,
-    (uint64_t)1628442824033694U, (uint64_t)1410811655893495U, (uint64_t)1009361960995171U,
-    (uint64_t)604736219740352U, (uint64_t)392445928555351U, (uint64_t)1254295770295706U,
-    (uint64_t)1958074535046128U, (uint64_t)508699942241019U, (uint64_t)739405911261325U,
-    (uint64_t)1678760393882409U, (uint64_t)517763708545996U, (uint64_t)640040257898722U,
-    (uint64_t)384966810872913U, (uint64_t)407454748380128U, (uint64_t)152604679407451U,
-    (uint64_t)185102854927662U, (uint64_t)1448175503649595U, (uint64_t)100328519208674U,
-    (uint64_t)1153263667012830U, (uint64_t)1643926437586490U, (uint64_t)609632142834154U,
-    (uint64_t)980984004749261U, (uint64_t)855290732258779U, (uint64_t)2186022163021506U,
-    (uint64_t)1254052618626070U, (uint64_t)1850030517182611U, (uint64_t)162348933090207U,
-    (uint64_t)1948712273679932U, (uint64_t)1331832516262191U, (uint64_t)1219400369175863U,
-    (uint64_t)89689036937483U, (uint64_t)1554886057235815U, (uint64_t)1520047528432789U,
-    (uint64_t)81263957652811U, (uint64_t)146612464257008U, (uint64_t)2207945627164163U,
-    (uint64_t)919846660682546U, (uint64_t)1925694087906686U, (uint64_t)2102027292388012U,
-    (uint64_t)887992003198635U, (uint64_t)1817924871537027U, (uint64_t)746660005584342U,
-    (uint64_t)753757153275525U, (uint64_t)91394270908699U, (uint64_t)511837226544151U,
-    (uint64_t)736341543649373U, (uint64_t)1256371121466367U, (uint64_t)1977778299551813U,
-    (uint64_t)817915174462263U, (uint64_t)1602323381418035U, (uint64_t)190035164572930U,
-    (uint64_t)603796401391181U, (uint64_t)2152666873671669U, (uint64_t)1813900316324112U,
-    (uint64_t)1292622433358041U, (uint64_t)888439870199892U, (uint64_t)978918155071994U,
-    (uint64_t)534184417909805U, (uint64_t)466460084317313U, (uint64_t)1275223140288685U,
-    (uint64_t)786407043883517U, (uint64_t)1620520623925754U, (uint64_t)1753625021290269U,
-    (uint64_t)751937175104525U, (uint64_t)905301961820613U, (uint64_t)697059847245437U,
-    (uint64_t)584919033981144U, (uint64_t)1272165506533156U, (uint64_t)1532180021450866U,
-    (uint64_t)1901407354005301U, (uint64_t)1421319720492586U, (uint64_t)2179081609765456U,
-    (uint64_t)2193253156667632U, (uint64_t)1080248329608584U, (uint64_t)2158422436462066U,
-    (uint64_t)759167597017850U, (uint64_t)545759071151285U, (uint64_t)641600428493698U,
-    (uint64_t)943791424499848U, (uint64_t)469571542427864U, (uint64_t)951117845222467U,
-    (uint64_t)1780538594373407U, (uint64_t)614611122040309U, (uint64_t)1354826131886963U,
-    (uint64_t)221898131992340U, (uint64_t)1145699723916219U, (uint64_t)798735379961769U,
-    (uint64_t)1843560518208287U, (uint64_t)1424523160161545U, (uint64_t)205549016574779U,
-    (uint64_t)2239491587362749U, (uint64_t)1918363582399888U, (uint64_t)1292183072788455U,
-    (uint64_t)1783513123192567U, (uint64_t)1584027954317205U, (uint64_t)1890421443925740U,
-    (uint64_t)1718459319874929U, (uint64_t)1522091040748809U, (uint64_t)399467600667219U,
-    (uint64_t)1870973059066576U, (uint64_t)287514433150348U, (uint64_t)1397845311152885U,
-    (uint64_t)1880440629872863U, (uint64_t)709302939340341U, (uint64_t)1813571361109209U,
-    (uint64_t)86598795876860U, (uint64_t)1146964554310612U, (uint64_t)1590956584862432U,
-    (uint64_t)2097004628155559U, (uint64_t)656227622102390U, (uint64_t)1808500445541891U,
-    (uint64_t)958336726523135U, (uint64_t)2007604569465975U, (uint64_t)313504950390997U,
-    (uint64_t)1399686004953620U, (uint64_t)1759732788465234U, (uint64_t)1562539721055836U,
-    (uint64_t)1575722765016293U, (uint64_t)793318366641259U, (uint64_t)443876859384887U,
-    (uint64_t)547308921989704U, (uint64_t)636698687503328U, (uint64_t)2179175835287340U,
-    (uint64_t)498333551718258U, (uint64_t)932248760026176U, (uint64_t)1612395686304653U,
-    (uint64_t)2179774103745626U, (uint64_t)1359658123541018U, (uint64_t)171488501802442U,
-    (uint64_t)1625034951791350U, (uint64_t)520196922773633U, (uint64_t)1873787546341877U,
-    (uint64_t)303457823885368U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL,
+    2017604552196030ULL, 1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL,
+    1253097517254054ULL, 721798270973250ULL, 161923365415298ULL, 828530877526011ULL,
+    1494851059386763ULL, 662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL,
+    497078277852673ULL, 1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL,
+    1331036420272667ULL, 296286697520787ULL, 1179367922639127ULL, 25348441419697ULL,
+    2200984961703188ULL, 150893128908291ULL, 1978614888570852ULL, 1539657347172046ULL,
+    553810196523619ULL, 246017573977646ULL, 1440448985385485ULL, 346049108099981ULL,
+    601166606218546ULL, 855822004151713ULL, 1957521326383188ULL, 1114240380430887ULL,
+    1349639675122048ULL, 957375954499040ULL, 111551795360136ULL, 618586733648988ULL,
+    490708840688866ULL, 1267002049697314ULL, 1130723224930028ULL, 215603029480828ULL,
+    1277138555414710ULL, 1556750324971322ULL, 1407903521793741ULL, 1836836546590749ULL,
+    576500297444199ULL, 2074707599091135ULL, 1826239864380012ULL, 1935365705983312ULL,
+    239501825683682ULL, 1594236669034980ULL, 1283078975055301ULL, 856745636255925ULL,
+    1342128647959981ULL, 945216428379689ULL, 938746202496410ULL, 105775123333919ULL,
+    1379852610117266ULL, 1770216827500275ULL, 1016017267535704ULL, 1902885522469532ULL,
+    994184703730489ULL, 2227487538793763ULL, 53155967096055ULL, 1264120808114350ULL,
+    1334928769376729ULL, 393911808079997ULL, 826229239481845ULL, 1827903006733192ULL,
+    1449283706008465ULL, 1258040415217849ULL, 1641484112868370ULL, 1140150841968176ULL,
+    391113338021313ULL, 162138667815833ULL, 742204396566060ULL, 110709233440557ULL,
+    90179377432917ULL, 530511949644489ULL, 911568635552279ULL, 135869304780166ULL,
+    617719999563692ULL, 1802525001631319ULL, 1836394639510490ULL, 1862739456475085ULL,
+    1378284444664288ULL, 1617882529391756ULL, 876124429891172ULL, 1147654641445091ULL,
+    1476943370400542ULL, 688601222759067ULL, 2120281968990205ULL, 1387113236912611ULL,
+    2125245820685788ULL, 1030674016350092ULL, 1594684598654247ULL, 1165939511879820ULL,
+    271499323244173ULL, 546587254515484ULL, 945603425742936ULL, 1242252568170226ULL,
+    561598728058142ULL, 604827091794712ULL, 19869753585186ULL, 565367744708915ULL,
+    536755754533603ULL, 1767258313589487ULL, 907952975936127ULL, 292851652613937ULL,
+    163573546237963ULL, 837601408384564ULL, 591996990118301ULL, 2126051747693057ULL,
+    182247548824566ULL, 908369044122868ULL, 1335442699947273ULL, 2234292296528612ULL,
+    689537529333034ULL, 2174778663790714ULL, 1011407643592667ULL, 1856130618715473ULL,
+    1557437221651741ULL, 2250285407006102ULL, 1412384213410827ULL, 1428042038612456ULL,
+    962709733973660ULL, 313995703125919ULL, 1844969155869325ULL, 787716782673657ULL,
+    622504542173478ULL, 930119043384654ULL, 2128870043952488ULL, 537781531479523ULL,
+    1556666269904940ULL, 417333635741346ULL, 1986743846438415ULL, 877620478041197ULL,
+    2205624582983829ULL, 595260668884488ULL, 2025159350373157ULL, 2091659716088235ULL,
+    1423634716596391ULL, 653686638634080ULL, 1972388399989956ULL, 795575741798014ULL,
+    889240107997846ULL, 1446156876910732ULL, 1028507012221776ULL, 1071697574586478ULL,
+    1689630411899691ULL, 604092816502174ULL, 1909917373896122ULL, 1602544877643837ULL,
+    1227177032923867ULL, 62684197535630ULL, 186146290753883ULL, 414449055316766ULL,
+    1560555880866750ULL, 157579947096755ULL, 230526795502384ULL, 1197673369665894ULL,
+    593779215869037ULL, 214638834474097ULL, 1796344443484478ULL, 493550548257317ULL,
+    1628442824033694ULL, 1410811655893495ULL, 1009361960995171ULL, 604736219740352ULL,
+    392445928555351ULL, 1254295770295706ULL, 1958074535046128ULL, 508699942241019ULL,
+    739405911261325ULL, 1678760393882409ULL, 517763708545996ULL, 640040257898722ULL,
+    384966810872913ULL, 407454748380128ULL, 152604679407451ULL, 185102854927662ULL,
+    1448175503649595ULL, 100328519208674ULL, 1153263667012830ULL, 1643926437586490ULL,
+    609632142834154ULL, 980984004749261ULL, 855290732258779ULL, 2186022163021506ULL,
+    1254052618626070ULL, 1850030517182611ULL, 162348933090207ULL, 1948712273679932ULL,
+    1331832516262191ULL, 1219400369175863ULL, 89689036937483ULL, 1554886057235815ULL,
+    1520047528432789ULL, 81263957652811ULL, 146612464257008ULL, 2207945627164163ULL,
+    919846660682546ULL, 1925694087906686ULL, 2102027292388012ULL, 887992003198635ULL,
+    1817924871537027ULL, 746660005584342ULL, 753757153275525ULL, 91394270908699ULL,
+    511837226544151ULL, 736341543649373ULL, 1256371121466367ULL, 1977778299551813ULL,
+    817915174462263ULL, 1602323381418035ULL, 190035164572930ULL, 603796401391181ULL,
+    2152666873671669ULL, 1813900316324112ULL, 1292622433358041ULL, 888439870199892ULL,
+    978918155071994ULL, 534184417909805ULL, 466460084317313ULL, 1275223140288685ULL,
+    786407043883517ULL, 1620520623925754ULL, 1753625021290269ULL, 751937175104525ULL,
+    905301961820613ULL, 697059847245437ULL, 584919033981144ULL, 1272165506533156ULL,
+    1532180021450866ULL, 1901407354005301ULL, 1421319720492586ULL, 2179081609765456ULL,
+    2193253156667632ULL, 1080248329608584ULL, 2158422436462066ULL, 759167597017850ULL,
+    545759071151285ULL, 641600428493698ULL, 943791424499848ULL, 469571542427864ULL,
+    951117845222467ULL, 1780538594373407ULL, 614611122040309ULL, 1354826131886963ULL,
+    221898131992340ULL, 1145699723916219ULL, 798735379961769ULL, 1843560518208287ULL,
+    1424523160161545ULL, 205549016574779ULL, 2239491587362749ULL, 1918363582399888ULL,
+    1292183072788455ULL, 1783513123192567ULL, 1584027954317205ULL, 1890421443925740ULL,
+    1718459319874929ULL, 1522091040748809ULL, 399467600667219ULL, 1870973059066576ULL,
+    287514433150348ULL, 1397845311152885ULL, 1880440629872863ULL, 709302939340341ULL,
+    1813571361109209ULL, 86598795876860ULL, 1146964554310612ULL, 1590956584862432ULL,
+    2097004628155559ULL, 656227622102390ULL, 1808500445541891ULL, 958336726523135ULL,
+    2007604569465975ULL, 313504950390997ULL, 1399686004953620ULL, 1759732788465234ULL,
+    1562539721055836ULL, 1575722765016293ULL, 793318366641259ULL, 443876859384887ULL,
+    547308921989704ULL, 636698687503328ULL, 2179175835287340ULL, 498333551718258ULL,
+    932248760026176ULL, 1612395686304653ULL, 2179774103745626ULL, 1359658123541018ULL,
+    171488501802442ULL, 1625034951791350ULL, 520196922773633ULL, 1873787546341877ULL,
+    303457823885368ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5[640U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U, (uint64_t)983226445635730U,
-    (uint64_t)44873798519521U, (uint64_t)697147127512130U, (uint64_t)961631038239304U,
-    (uint64_t)709966160696826U, (uint64_t)1706677689540366U, (uint64_t)502782733796035U,
-    (uint64_t)812545535346033U, (uint64_t)1693622521296452U, (uint64_t)1955813093002510U,
-    (uint64_t)1259937612881362U, (uint64_t)1873032503803559U, (uint64_t)1140330566016428U,
-    (uint64_t)1675726082440190U, (uint64_t)60029928909786U, (uint64_t)170335608866763U,
-    (uint64_t)766444312315022U, (uint64_t)2025049511434113U, (uint64_t)2200845622430647U,
-    (uint64_t)1201269851450408U, (uint64_t)590071752404907U, (uint64_t)1400995030286946U,
-    (uint64_t)2152637413853822U, (uint64_t)2108495473841983U, (uint64_t)3855406710349U,
-    (uint64_t)1726137673168580U, (uint64_t)51004317200100U, (uint64_t)1749082328586939U,
-    (uint64_t)1704088976144558U, (uint64_t)1977318954775118U, (uint64_t)2062602253162400U,
-    (uint64_t)948062503217479U, (uint64_t)361953965048030U, (uint64_t)1528264887238440U,
-    (uint64_t)62582552172290U, (uint64_t)2241602163389280U, (uint64_t)156385388121765U,
-    (uint64_t)2124100319761492U, (uint64_t)388928050571382U, (uint64_t)1556123596922727U,
-    (uint64_t)979310669812384U, (uint64_t)113043855206104U, (uint64_t)2023223924825469U,
-    (uint64_t)643651703263034U, (uint64_t)2234446903655540U, (uint64_t)1577241261424997U,
-    (uint64_t)860253174523845U, (uint64_t)1691026473082448U, (uint64_t)1091672764933872U,
-    (uint64_t)1957463109756365U, (uint64_t)530699502660193U, (uint64_t)349587141723569U,
-    (uint64_t)674661681919563U, (uint64_t)1633727303856240U, (uint64_t)708909037922144U,
-    (uint64_t)2160722508518119U, (uint64_t)1302188051602540U, (uint64_t)976114603845777U,
-    (uint64_t)120004758721939U, (uint64_t)1681630708873780U, (uint64_t)622274095069244U,
-    (uint64_t)1822346309016698U, (uint64_t)1100921177951904U, (uint64_t)2216952659181677U,
-    (uint64_t)1844020550362490U, (uint64_t)1976451368365774U, (uint64_t)1321101422068822U,
-    (uint64_t)1189859436282668U, (uint64_t)2008801879735257U, (uint64_t)2219413454333565U,
-    (uint64_t)424288774231098U, (uint64_t)359793146977912U, (uint64_t)270293357948703U,
-    (uint64_t)587226003677000U, (uint64_t)1482071926139945U, (uint64_t)1419630774650359U,
-    (uint64_t)1104739070570175U, (uint64_t)1662129023224130U, (uint64_t)1609203612533411U,
-    (uint64_t)1250932720691980U, (uint64_t)95215711818495U, (uint64_t)498746909028150U,
-    (uint64_t)158151296991874U, (uint64_t)1201379988527734U, (uint64_t)561599945143989U,
-    (uint64_t)2211577425617888U, (uint64_t)2166577612206324U, (uint64_t)1057590354233512U,
-    (uint64_t)1968123280416769U, (uint64_t)1316586165401313U, (uint64_t)762728164447634U,
-    (uint64_t)2045395244316047U, (uint64_t)1531796898725716U, (uint64_t)315385971670425U,
-    (uint64_t)1109421039396756U, (uint64_t)2183635256408562U, (uint64_t)1896751252659461U,
-    (uint64_t)840236037179080U, (uint64_t)796245792277211U, (uint64_t)508345890111193U,
-    (uint64_t)1275386465287222U, (uint64_t)513560822858784U, (uint64_t)1784735733120313U,
-    (uint64_t)1346467478899695U, (uint64_t)601125231208417U, (uint64_t)701076661112726U,
-    (uint64_t)1841998436455089U, (uint64_t)1156768600940434U, (uint64_t)1967853462343221U,
-    (uint64_t)2178318463061452U, (uint64_t)481885520752741U, (uint64_t)675262828640945U,
-    (uint64_t)1033539418596582U, (uint64_t)1743329872635846U, (uint64_t)159322641251283U,
-    (uint64_t)1573076470127113U, (uint64_t)954827619308195U, (uint64_t)778834750662635U,
-    (uint64_t)619912782122617U, (uint64_t)515681498488209U, (uint64_t)1675866144246843U,
-    (uint64_t)811716020969981U, (uint64_t)1125515272217398U, (uint64_t)1398917918287342U,
-    (uint64_t)1301680949183175U, (uint64_t)726474739583734U, (uint64_t)587246193475200U,
-    (uint64_t)1096581582611864U, (uint64_t)1469911826213486U, (uint64_t)1990099711206364U,
-    (uint64_t)1256496099816508U, (uint64_t)2019924615195672U, (uint64_t)1251232456707555U,
-    (uint64_t)2042971196009755U, (uint64_t)214061878479265U, (uint64_t)115385726395472U,
-    (uint64_t)1677875239524132U, (uint64_t)756888883383540U, (uint64_t)1153862117756233U,
-    (uint64_t)503391530851096U, (uint64_t)946070017477513U, (uint64_t)1878319040542579U,
-    (uint64_t)1101349418586920U, (uint64_t)793245696431613U, (uint64_t)397920495357645U,
-    (uint64_t)2174023872951112U, (uint64_t)1517867915189593U, (uint64_t)1829855041462995U,
-    (uint64_t)1046709983503619U, (uint64_t)424081940711857U, (uint64_t)2112438073094647U,
-    (uint64_t)1504338467349861U, (uint64_t)2244574127374532U, (uint64_t)2136937537441911U,
-    (uint64_t)1741150838990304U, (uint64_t)25894628400571U, (uint64_t)512213526781178U,
-    (uint64_t)1168384260796379U, (uint64_t)1424607682379833U, (uint64_t)938677789731564U,
-    (uint64_t)872882241891896U, (uint64_t)1713199397007700U, (uint64_t)1410496326218359U,
-    (uint64_t)854379752407031U, (uint64_t)465141611727634U, (uint64_t)315176937037857U,
-    (uint64_t)1020115054571233U, (uint64_t)1856290111077229U, (uint64_t)2028366269898204U,
-    (uint64_t)1432980880307543U, (uint64_t)469932710425448U, (uint64_t)581165267592247U,
-    (uint64_t)496399148156603U, (uint64_t)2063435226705903U, (uint64_t)2116841086237705U,
-    (uint64_t)498272567217048U, (uint64_t)1829438076967906U, (uint64_t)1573925801278491U,
-    (uint64_t)460763576329867U, (uint64_t)1705264723728225U, (uint64_t)999514866082412U,
-    (uint64_t)29635061779362U, (uint64_t)1884233592281020U, (uint64_t)1449755591461338U,
-    (uint64_t)42579292783222U, (uint64_t)1869504355369200U, (uint64_t)495506004805251U,
-    (uint64_t)264073104888427U, (uint64_t)2088880861028612U, (uint64_t)104646456386576U,
-    (uint64_t)1258445191399967U, (uint64_t)1348736801545799U, (uint64_t)2068276361286613U,
-    (uint64_t)884897216646374U, (uint64_t)922387476801376U, (uint64_t)1043886580402805U,
-    (uint64_t)1240883498470831U, (uint64_t)1601554651937110U, (uint64_t)804382935289482U,
-    (uint64_t)512379564477239U, (uint64_t)1466384519077032U, (uint64_t)1280698500238386U,
-    (uint64_t)211303836685749U, (uint64_t)2081725624793803U, (uint64_t)545247644516879U,
-    (uint64_t)215313359330384U, (uint64_t)286479751145614U, (uint64_t)2213650281751636U,
-    (uint64_t)2164927945999874U, (uint64_t)2072162991540882U, (uint64_t)1443769115444779U,
-    (uint64_t)1581473274363095U, (uint64_t)434633875922699U, (uint64_t)340456055781599U,
-    (uint64_t)373043091080189U, (uint64_t)839476566531776U, (uint64_t)1856706858509978U,
-    (uint64_t)931616224909153U, (uint64_t)1888181317414065U, (uint64_t)213654322650262U,
-    (uint64_t)1161078103416244U, (uint64_t)1822042328851513U, (uint64_t)915817709028812U,
-    (uint64_t)1828297056698188U, (uint64_t)1212017130909403U, (uint64_t)60258343247333U,
-    (uint64_t)342085800008230U, (uint64_t)930240559508270U, (uint64_t)1549884999174952U,
-    (uint64_t)809895264249462U, (uint64_t)184726257947682U, (uint64_t)1157065433504828U,
-    (uint64_t)1209999630381477U, (uint64_t)999920399374391U, (uint64_t)1714770150788163U,
-    (uint64_t)2026130985413228U, (uint64_t)506776632883140U, (uint64_t)1349042668246528U,
-    (uint64_t)1937232292976967U, (uint64_t)942302637530730U, (uint64_t)160211904766226U,
-    (uint64_t)1042724500438571U, (uint64_t)212454865139142U, (uint64_t)244104425172642U,
-    (uint64_t)1376990622387496U, (uint64_t)76126752421227U, (uint64_t)1027540886376422U,
-    (uint64_t)1912210655133026U, (uint64_t)13410411589575U, (uint64_t)1475856708587773U,
-    (uint64_t)615563352691682U, (uint64_t)1446629324872644U, (uint64_t)1683670301784014U,
-    (uint64_t)1049873327197127U, (uint64_t)1826401704084838U, (uint64_t)2032577048760775U,
-    (uint64_t)1922203607878853U, (uint64_t)836708788764806U, (uint64_t)2193084654695012U,
-    (uint64_t)1342923183256659U, (uint64_t)849356986294271U, (uint64_t)1228863973965618U,
-    (uint64_t)94886161081867U, (uint64_t)1423288430204892U, (uint64_t)2016167528707016U,
-    (uint64_t)1633187660972877U, (uint64_t)1550621242301752U, (uint64_t)340630244512994U,
-    (uint64_t)2103577710806901U, (uint64_t)221625016538931U, (uint64_t)421544147350960U,
-    (uint64_t)580428704555156U, (uint64_t)1479831381265617U, (uint64_t)518057926544698U,
-    (uint64_t)955027348790630U, (uint64_t)1326749172561598U, (uint64_t)1118304625755967U,
-    (uint64_t)1994005916095176U, (uint64_t)1799757332780663U, (uint64_t)751343129396941U,
-    (uint64_t)1468672898746144U, (uint64_t)1451689964451386U, (uint64_t)755070293921171U,
-    (uint64_t)904857405877052U, (uint64_t)1276087530766984U, (uint64_t)403986562858511U,
-    (uint64_t)1530661255035337U, (uint64_t)1644972908910502U, (uint64_t)1370170080438957U,
-    (uint64_t)139839536695744U, (uint64_t)909930462436512U, (uint64_t)1899999215356933U,
-    (uint64_t)635992381064566U, (uint64_t)788740975837654U, (uint64_t)224241231493695U,
-    (uint64_t)1267090030199302U, (uint64_t)998908061660139U, (uint64_t)1784537499699278U,
-    (uint64_t)859195370018706U, (uint64_t)1953966091439379U, (uint64_t)2189271820076010U,
-    (uint64_t)2039067059943978U, (uint64_t)1526694380855202U, (uint64_t)2040321513194941U,
-    (uint64_t)329922071218689U, (uint64_t)1953032256401326U, (uint64_t)989631424403521U,
-    (uint64_t)328825014934242U, (uint64_t)9407151397696U, (uint64_t)63551373671268U,
-    (uint64_t)1624728632895792U, (uint64_t)1608324920739262U, (uint64_t)1178239350351945U,
-    (uint64_t)1198077399579702U, (uint64_t)277620088676229U, (uint64_t)1775359437312528U,
-    (uint64_t)1653558177737477U, (uint64_t)1652066043408850U, (uint64_t)1063359889686622U,
-    (uint64_t)1975063804860653U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL,
+    983226445635730ULL, 44873798519521ULL, 697147127512130ULL, 961631038239304ULL,
+    709966160696826ULL, 1706677689540366ULL, 502782733796035ULL, 812545535346033ULL,
+    1693622521296452ULL, 1955813093002510ULL, 1259937612881362ULL, 1873032503803559ULL,
+    1140330566016428ULL, 1675726082440190ULL, 60029928909786ULL, 170335608866763ULL,
+    766444312315022ULL, 2025049511434113ULL, 2200845622430647ULL, 1201269851450408ULL,
+    590071752404907ULL, 1400995030286946ULL, 2152637413853822ULL, 2108495473841983ULL,
+    3855406710349ULL, 1726137673168580ULL, 51004317200100ULL, 1749082328586939ULL,
+    1704088976144558ULL, 1977318954775118ULL, 2062602253162400ULL, 948062503217479ULL,
+    361953965048030ULL, 1528264887238440ULL, 62582552172290ULL, 2241602163389280ULL,
+    156385388121765ULL, 2124100319761492ULL, 388928050571382ULL, 1556123596922727ULL,
+    979310669812384ULL, 113043855206104ULL, 2023223924825469ULL, 643651703263034ULL,
+    2234446903655540ULL, 1577241261424997ULL, 860253174523845ULL, 1691026473082448ULL,
+    1091672764933872ULL, 1957463109756365ULL, 530699502660193ULL, 349587141723569ULL,
+    674661681919563ULL, 1633727303856240ULL, 708909037922144ULL, 2160722508518119ULL,
+    1302188051602540ULL, 976114603845777ULL, 120004758721939ULL, 1681630708873780ULL,
+    622274095069244ULL, 1822346309016698ULL, 1100921177951904ULL, 2216952659181677ULL,
+    1844020550362490ULL, 1976451368365774ULL, 1321101422068822ULL, 1189859436282668ULL,
+    2008801879735257ULL, 2219413454333565ULL, 424288774231098ULL, 359793146977912ULL,
+    270293357948703ULL, 587226003677000ULL, 1482071926139945ULL, 1419630774650359ULL,
+    1104739070570175ULL, 1662129023224130ULL, 1609203612533411ULL, 1250932720691980ULL,
+    95215711818495ULL, 498746909028150ULL, 158151296991874ULL, 1201379988527734ULL,
+    561599945143989ULL, 2211577425617888ULL, 2166577612206324ULL, 1057590354233512ULL,
+    1968123280416769ULL, 1316586165401313ULL, 762728164447634ULL, 2045395244316047ULL,
+    1531796898725716ULL, 315385971670425ULL, 1109421039396756ULL, 2183635256408562ULL,
+    1896751252659461ULL, 840236037179080ULL, 796245792277211ULL, 508345890111193ULL,
+    1275386465287222ULL, 513560822858784ULL, 1784735733120313ULL, 1346467478899695ULL,
+    601125231208417ULL, 701076661112726ULL, 1841998436455089ULL, 1156768600940434ULL,
+    1967853462343221ULL, 2178318463061452ULL, 481885520752741ULL, 675262828640945ULL,
+    1033539418596582ULL, 1743329872635846ULL, 159322641251283ULL, 1573076470127113ULL,
+    954827619308195ULL, 778834750662635ULL, 619912782122617ULL, 515681498488209ULL,
+    1675866144246843ULL, 811716020969981ULL, 1125515272217398ULL, 1398917918287342ULL,
+    1301680949183175ULL, 726474739583734ULL, 587246193475200ULL, 1096581582611864ULL,
+    1469911826213486ULL, 1990099711206364ULL, 1256496099816508ULL, 2019924615195672ULL,
+    1251232456707555ULL, 2042971196009755ULL, 214061878479265ULL, 115385726395472ULL,
+    1677875239524132ULL, 756888883383540ULL, 1153862117756233ULL, 503391530851096ULL,
+    946070017477513ULL, 1878319040542579ULL, 1101349418586920ULL, 793245696431613ULL,
+    397920495357645ULL, 2174023872951112ULL, 1517867915189593ULL, 1829855041462995ULL,
+    1046709983503619ULL, 424081940711857ULL, 2112438073094647ULL, 1504338467349861ULL,
+    2244574127374532ULL, 2136937537441911ULL, 1741150838990304ULL, 25894628400571ULL,
+    512213526781178ULL, 1168384260796379ULL, 1424607682379833ULL, 938677789731564ULL,
+    872882241891896ULL, 1713199397007700ULL, 1410496326218359ULL, 854379752407031ULL,
+    465141611727634ULL, 315176937037857ULL, 1020115054571233ULL, 1856290111077229ULL,
+    2028366269898204ULL, 1432980880307543ULL, 469932710425448ULL, 581165267592247ULL,
+    496399148156603ULL, 2063435226705903ULL, 2116841086237705ULL, 498272567217048ULL,
+    1829438076967906ULL, 1573925801278491ULL, 460763576329867ULL, 1705264723728225ULL,
+    999514866082412ULL, 29635061779362ULL, 1884233592281020ULL, 1449755591461338ULL,
+    42579292783222ULL, 1869504355369200ULL, 495506004805251ULL, 264073104888427ULL,
+    2088880861028612ULL, 104646456386576ULL, 1258445191399967ULL, 1348736801545799ULL,
+    2068276361286613ULL, 884897216646374ULL, 922387476801376ULL, 1043886580402805ULL,
+    1240883498470831ULL, 1601554651937110ULL, 804382935289482ULL, 512379564477239ULL,
+    1466384519077032ULL, 1280698500238386ULL, 211303836685749ULL, 2081725624793803ULL,
+    545247644516879ULL, 215313359330384ULL, 286479751145614ULL, 2213650281751636ULL,
+    2164927945999874ULL, 2072162991540882ULL, 1443769115444779ULL, 1581473274363095ULL,
+    434633875922699ULL, 340456055781599ULL, 373043091080189ULL, 839476566531776ULL,
+    1856706858509978ULL, 931616224909153ULL, 1888181317414065ULL, 213654322650262ULL,
+    1161078103416244ULL, 1822042328851513ULL, 915817709028812ULL, 1828297056698188ULL,
+    1212017130909403ULL, 60258343247333ULL, 342085800008230ULL, 930240559508270ULL,
+    1549884999174952ULL, 809895264249462ULL, 184726257947682ULL, 1157065433504828ULL,
+    1209999630381477ULL, 999920399374391ULL, 1714770150788163ULL, 2026130985413228ULL,
+    506776632883140ULL, 1349042668246528ULL, 1937232292976967ULL, 942302637530730ULL,
+    160211904766226ULL, 1042724500438571ULL, 212454865139142ULL, 244104425172642ULL,
+    1376990622387496ULL, 76126752421227ULL, 1027540886376422ULL, 1912210655133026ULL,
+    13410411589575ULL, 1475856708587773ULL, 615563352691682ULL, 1446629324872644ULL,
+    1683670301784014ULL, 1049873327197127ULL, 1826401704084838ULL, 2032577048760775ULL,
+    1922203607878853ULL, 836708788764806ULL, 2193084654695012ULL, 1342923183256659ULL,
+    849356986294271ULL, 1228863973965618ULL, 94886161081867ULL, 1423288430204892ULL,
+    2016167528707016ULL, 1633187660972877ULL, 1550621242301752ULL, 340630244512994ULL,
+    2103577710806901ULL, 221625016538931ULL, 421544147350960ULL, 580428704555156ULL,
+    1479831381265617ULL, 518057926544698ULL, 955027348790630ULL, 1326749172561598ULL,
+    1118304625755967ULL, 1994005916095176ULL, 1799757332780663ULL, 751343129396941ULL,
+    1468672898746144ULL, 1451689964451386ULL, 755070293921171ULL, 904857405877052ULL,
+    1276087530766984ULL, 403986562858511ULL, 1530661255035337ULL, 1644972908910502ULL,
+    1370170080438957ULL, 139839536695744ULL, 909930462436512ULL, 1899999215356933ULL,
+    635992381064566ULL, 788740975837654ULL, 224241231493695ULL, 1267090030199302ULL,
+    998908061660139ULL, 1784537499699278ULL, 859195370018706ULL, 1953966091439379ULL,
+    2189271820076010ULL, 2039067059943978ULL, 1526694380855202ULL, 2040321513194941ULL,
+    329922071218689ULL, 1953032256401326ULL, 989631424403521ULL, 328825014934242ULL,
+    9407151397696ULL, 63551373671268ULL, 1624728632895792ULL, 1608324920739262ULL,
+    1178239350351945ULL, 1198077399579702ULL, 277620088676229ULL, 1775359437312528ULL,
+    1653558177737477ULL, 1652066043408850ULL, 1063359889686622ULL, 1975063804860653ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Frodo_KEM.h b/include/internal/Hacl_Frodo_KEM.h
index 5d8f2a85..e4b1a3f9 100644
--- a/include/internal/Hacl_Frodo_KEM.h
+++ b/include/internal/Hacl_Frodo_KEM.h
@@ -64,13 +64,13 @@ Hacl_Keccak_shake128_4x(
 static inline void
 Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 {
-  if (logq < (uint32_t)16U)
+  if (logq < 16U)
   {
-    for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+    for (uint32_t i0 = 0U; i0 < n1; i0++)
     {
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      for (uint32_t i = 0U; i < n2; i++)
       {
-        a[i0 * n2 + i] = a[i0 * n2 + i] & (((uint16_t)1U << logq) - (uint16_t)1U);
+        a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] & ((1U << logq) - 1U);
       }
     }
     return;
@@ -80,11 +80,11 @@ Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 static inline void
 Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      a[i0 * n2 + i] = a[i0 * n2 + i] + b[i0 * n2 + i];
+      a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] + (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -92,11 +92,11 @@ Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_sub(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      b[i0 * n2 + i] = a[i0 * n2 + i] - b[i0 * n2 + i];
+      b[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] - (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -111,17 +111,17 @@ Hacl_Impl_Matrix_matrix_mul(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i * n3 + i1];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -138,17 +138,17 @@ Hacl_Impl_Matrix_matrix_mul_s(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i1 * n2 + i];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -158,11 +158,11 @@ Hacl_Impl_Matrix_matrix_mul_s(
 static inline uint16_t
 Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  uint16_t res = (uint16_t)0xFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  uint16_t res = 0xFFFFU;
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t uu____0 = FStar_UInt16_eq_mask(a[i], b[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint16_t r = res;
   return r;
@@ -171,19 +171,19 @@ Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_to_lbytes(uint32_t n1, uint32_t n2, uint16_t *m, uint8_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
-    store16_le(res + (uint32_t)2U * i, m[i]);
+    store16_le(res + 2U * i, m[i]);
   }
 }
 
 static inline void
 Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t *os = res;
-    uint16_t u = load16_le(b + (uint32_t)2U * i);
+    uint16_t u = load16_le(b + 2U * i);
     uint16_t x = u;
     os[i] = x;
   }
@@ -192,53 +192,53 @@ Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16
 static inline void
 Hacl_Impl_Frodo_Gen_frodo_gen_matrix_shake_4x(uint32_t n, uint8_t *seed, uint16_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), (uint32_t)8U * n);
-  uint8_t r[(uint32_t)8U * n];
-  memset(r, 0U, (uint32_t)8U * n * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), 8U * n);
+  uint8_t r[8U * n];
+  memset(r, 0U, 8U * n * sizeof (uint8_t));
   uint8_t tmp_seed[72U] = { 0U };
-  memcpy(tmp_seed + (uint32_t)2U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)20U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)38U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)56U, seed, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 2U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 20U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 38U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 56U, seed, 16U * sizeof (uint8_t));
   memset(res, 0U, n * n * sizeof (uint16_t));
-  for (uint32_t i = (uint32_t)0U; i < n / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < n / 4U; i++)
   {
-    uint8_t *r0 = r + (uint32_t)0U * n;
-    uint8_t *r1 = r + (uint32_t)2U * n;
-    uint8_t *r2 = r + (uint32_t)4U * n;
-    uint8_t *r3 = r + (uint32_t)6U * n;
+    uint8_t *r0 = r + 0U * n;
+    uint8_t *r1 = r + 2U * n;
+    uint8_t *r2 = r + 4U * n;
+    uint8_t *r3 = r + 6U * n;
     uint8_t *tmp_seed0 = tmp_seed;
-    uint8_t *tmp_seed1 = tmp_seed + (uint32_t)18U;
-    uint8_t *tmp_seed2 = tmp_seed + (uint32_t)36U;
-    uint8_t *tmp_seed3 = tmp_seed + (uint32_t)54U;
-    store16_le(tmp_seed0, (uint16_t)((uint32_t)4U * i + (uint32_t)0U));
-    store16_le(tmp_seed1, (uint16_t)((uint32_t)4U * i + (uint32_t)1U));
-    store16_le(tmp_seed2, (uint16_t)((uint32_t)4U * i + (uint32_t)2U));
-    store16_le(tmp_seed3, (uint16_t)((uint32_t)4U * i + (uint32_t)3U));
-    Hacl_Keccak_shake128_4x((uint32_t)18U,
+    uint8_t *tmp_seed1 = tmp_seed + 18U;
+    uint8_t *tmp_seed2 = tmp_seed + 36U;
+    uint8_t *tmp_seed3 = tmp_seed + 54U;
+    store16_le(tmp_seed0, (uint16_t)(4U * i + 0U));
+    store16_le(tmp_seed1, (uint16_t)(4U * i + 1U));
+    store16_le(tmp_seed2, (uint16_t)(4U * i + 2U));
+    store16_le(tmp_seed3, (uint16_t)(4U * i + 3U));
+    Hacl_Keccak_shake128_4x(18U,
       tmp_seed0,
       tmp_seed1,
       tmp_seed2,
       tmp_seed3,
-      (uint32_t)2U * n,
+      2U * n,
       r0,
       r1,
       r2,
       r3);
-    for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+    for (uint32_t i0 = 0U; i0 < n; i0++)
     {
-      uint8_t *resij0 = r0 + i0 * (uint32_t)2U;
-      uint8_t *resij1 = r1 + i0 * (uint32_t)2U;
-      uint8_t *resij2 = r2 + i0 * (uint32_t)2U;
-      uint8_t *resij3 = r3 + i0 * (uint32_t)2U;
+      uint8_t *resij0 = r0 + i0 * 2U;
+      uint8_t *resij1 = r1 + i0 * 2U;
+      uint8_t *resij2 = r2 + i0 * 2U;
+      uint8_t *resij3 = r3 + i0 * 2U;
       uint16_t u = load16_le(resij0);
-      res[((uint32_t)4U * i + (uint32_t)0U) * n + i0] = u;
+      res[(4U * i + 0U) * n + i0] = u;
       uint16_t u0 = load16_le(resij1);
-      res[((uint32_t)4U * i + (uint32_t)1U) * n + i0] = u0;
+      res[(4U * i + 1U) * n + i0] = u0;
       uint16_t u1 = load16_le(resij2);
-      res[((uint32_t)4U * i + (uint32_t)2U) * n + i0] = u1;
+      res[(4U * i + 2U) * n + i0] = u1;
       uint16_t u2 = load16_le(resij3);
-      res[((uint32_t)4U * i + (uint32_t)3U) * n + i0] = u2;
+      res[(4U * i + 3U) * n + i0] = u2;
     }
   }
 }
@@ -270,27 +270,19 @@ static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table640[13U] =
   {
-    (uint16_t)4643U, (uint16_t)13363U, (uint16_t)20579U, (uint16_t)25843U, (uint16_t)29227U,
-    (uint16_t)31145U, (uint16_t)32103U, (uint16_t)32525U, (uint16_t)32689U, (uint16_t)32745U,
-    (uint16_t)32762U, (uint16_t)32766U, (uint16_t)32767U
+    4643U, 13363U, 20579U, 25843U, 29227U, 31145U, 32103U, 32525U, 32689U, 32745U, 32762U, 32766U,
+    32767U
   };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table976[11U] =
-  {
-    (uint16_t)5638U, (uint16_t)15915U, (uint16_t)23689U, (uint16_t)28571U, (uint16_t)31116U,
-    (uint16_t)32217U, (uint16_t)32613U, (uint16_t)32731U, (uint16_t)32760U, (uint16_t)32766U,
-    (uint16_t)32767U
-  };
+  { 5638U, 15915U, 23689U, 28571U, 31116U, 32217U, 32613U, 32731U, 32760U, 32766U, 32767U };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table1344[7U] =
-  {
-    (uint16_t)9142U, (uint16_t)23462U, (uint16_t)30338U, (uint16_t)32361U, (uint16_t)32725U,
-    (uint16_t)32765U, (uint16_t)32767U
-  };
+  { 9142U, 23462U, 30338U, 32361U, 32725U, 32765U, 32767U };
 
 static inline void
 Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
@@ -301,26 +293,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -334,26 +326,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -367,26 +359,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)10U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 10U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table976[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -400,26 +392,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)6U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 6U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table1344[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -435,39 +427,34 @@ Hacl_Impl_Frodo_Pack_frodo_pack(
   uint8_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
-    uint16_t *a1 = a + (uint32_t)8U * i;
+    uint16_t *a1 = a + 8U * i;
     uint8_t *r = res + d * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t v16[16U] = { 0U };
-    uint16_t a0 = a1[0U] & maskd;
-    uint16_t a11 = a1[1U] & maskd;
-    uint16_t a2 = a1[2U] & maskd;
-    uint16_t a3 = a1[3U] & maskd;
-    uint16_t a4 = a1[4U] & maskd;
-    uint16_t a5 = a1[5U] & maskd;
-    uint16_t a6 = a1[6U] & maskd;
-    uint16_t a7 = a1[7U] & maskd;
+    uint16_t a0 = (uint32_t)a1[0U] & (uint32_t)maskd;
+    uint16_t a11 = (uint32_t)a1[1U] & (uint32_t)maskd;
+    uint16_t a2 = (uint32_t)a1[2U] & (uint32_t)maskd;
+    uint16_t a3 = (uint32_t)a1[3U] & (uint32_t)maskd;
+    uint16_t a4 = (uint32_t)a1[4U] & (uint32_t)maskd;
+    uint16_t a5 = (uint32_t)a1[5U] & (uint32_t)maskd;
+    uint16_t a6 = (uint32_t)a1[6U] & (uint32_t)maskd;
+    uint16_t a7 = (uint32_t)a1[7U] & (uint32_t)maskd;
     FStar_UInt128_uint128
     templong =
       FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a0),
-                      (uint32_t)7U * d),
-                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11),
-                      (uint32_t)6U * d)),
-                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2),
-                    (uint32_t)5U * d)),
-                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3),
-                  (uint32_t)4U * d)),
-              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4),
-                (uint32_t)3U * d)),
-            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5),
-              (uint32_t)2U * d)),
-          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), (uint32_t)1U * d)),
-        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), (uint32_t)0U * d));
+                      7U * d),
+                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11), 6U * d)),
+                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2), 5U * d)),
+                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3), 4U * d)),
+              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4), 3U * d)),
+            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5), 2U * d)),
+          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), 1U * d)),
+        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), 0U * d));
     store128_be(v16, templong);
-    uint8_t *src = v16 + (uint32_t)16U - d;
+    uint8_t *src = v16 + 16U - d;
     memcpy(r, src, d * sizeof (uint8_t));
   }
 }
@@ -481,48 +468,48 @@ Hacl_Impl_Frodo_Pack_frodo_unpack(
   uint16_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *b1 = b + d * i;
-    uint16_t *r = res + (uint32_t)8U * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t *r = res + 8U * i;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t src[16U] = { 0U };
-    memcpy(src + (uint32_t)16U - d, b1, d * sizeof (uint8_t));
+    memcpy(src + 16U - d, b1, d * sizeof (uint8_t));
     FStar_UInt128_uint128 u = load128_be(src);
     FStar_UInt128_uint128 templong = u;
     r[0U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)7U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          7U * d))
+      & (uint32_t)maskd;
     r[1U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)6U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          6U * d))
+      & (uint32_t)maskd;
     r[2U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)5U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          5U * d))
+      & (uint32_t)maskd;
     r[3U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)4U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          4U * d))
+      & (uint32_t)maskd;
     r[4U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)3U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          3U * d))
+      & (uint32_t)maskd;
     r[5U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)2U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          2U * d))
+      & (uint32_t)maskd;
     r[6U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)1U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          1U * d))
+      & (uint32_t)maskd;
     r[7U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)0U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          0U * d))
+      & (uint32_t)maskd;
   }
 }
 
@@ -535,7 +522,7 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
   uint16_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
     uint8_t v8[8U] = { 0U };
     uint8_t *chunk = a + i0 * b;
@@ -544,11 +531,11 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
     uint64_t x = u;
     uint64_t x0 = x;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint64_t rk = x0 >> b * i & (((uint64_t)1U << b) - (uint64_t)1U);
-      res[i0 * n + i] = (uint16_t)rk << (logq - b););
+      0U,
+      8U,
+      1U,
+      uint64_t rk = x0 >> b * i & ((1ULL << b) - 1ULL);
+      res[i0 * n + i] = (uint32_t)(uint16_t)rk << (logq - b););
   }
 }
 
@@ -561,16 +548,16 @@ Hacl_Impl_Frodo_Encode_frodo_key_decode(
   uint8_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
-    uint64_t templong = (uint64_t)0U;
+    uint64_t templong = 0ULL;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
+      0U,
+      8U,
+      1U,
       uint16_t aik = a[i0 * n + i];
-      uint16_t res1 = (aik + ((uint16_t)1U << (logq - b - (uint32_t)1U))) >> (logq - b);
-      templong = templong | (uint64_t)(res1 & (((uint16_t)1U << b) - (uint16_t)1U)) << b * i;);
+      uint16_t res1 = (((uint32_t)aik + (1U << (logq - b - 1U))) & 0xFFFFU) >> (logq - b);
+      templong = templong | (uint64_t)((uint32_t)res1 & ((1U << b) - 1U)) << b * i;);
     uint64_t templong0 = templong;
     uint8_t v8[8U] = { 0U };
     store64_le(v8, templong0);
diff --git a/include/internal/Hacl_Hash_SHA2.h b/include/internal/Hacl_Hash_SHA2.h
index bbffdc50..8c912fb8 100644
--- a/include/internal/Hacl_Hash_SHA2.h
+++ b/include/internal/Hacl_Hash_SHA2.h
@@ -42,89 +42,74 @@ static const
 uint32_t
 Hacl_Impl_SHA2_Generic_h224[8U] =
   {
-    (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U,
-    (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U
+    0xc1059ed8U, 0x367cd507U, 0x3070dd17U, 0xf70e5939U, 0xffc00b31U, 0x68581511U, 0x64f98fa7U,
+    0xbefa4fa4U
   };
 
 static const
 uint32_t
 Hacl_Impl_SHA2_Generic_h256[8U] =
   {
-    (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
-    (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
+    0x6a09e667U, 0xbb67ae85U, 0x3c6ef372U, 0xa54ff53aU, 0x510e527fU, 0x9b05688cU, 0x1f83d9abU,
+    0x5be0cd19U
   };
 
 static const
 uint64_t
 Hacl_Impl_SHA2_Generic_h384[8U] =
   {
-    (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
-    (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
-    (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
+    0xcbbb9d5dc1059ed8ULL, 0x629a292a367cd507ULL, 0x9159015a3070dd17ULL, 0x152fecd8f70e5939ULL,
+    0x67332667ffc00b31ULL, 0x8eb44a8768581511ULL, 0xdb0c2e0d64f98fa7ULL, 0x47b5481dbefa4fa4ULL
   };
 
 static const
 uint64_t
 Hacl_Impl_SHA2_Generic_h512[8U] =
   {
-    (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
-    (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
-    (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
+    0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
+    0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
   };
 
 static const
 uint32_t
 Hacl_Impl_SHA2_Generic_k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 static const
 uint64_t
 Hacl_Impl_SHA2_Generic_k384_512[80U] =
   {
-    (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU,
-    (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U,
-    (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U,
-    (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U,
-    (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U,
-    (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U,
-    (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U,
-    (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U,
-    (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU,
-    (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U,
-    (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU,
-    (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU,
-    (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U,
-    (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U,
-    (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U,
-    (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U,
-    (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U,
-    (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU,
-    (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU,
-    (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU,
-    (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U,
-    (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U,
-    (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU,
-    (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU,
-    (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU,
-    (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU,
-    (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U
+    0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+    0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+    0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+    0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+    0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+    0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+    0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+    0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+    0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+    0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+    0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+    0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+    0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+    0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+    0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+    0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+    0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+    0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+    0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+    0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
   };
 
 void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash);
diff --git a/include/internal/Hacl_Impl_Blake2_Constants.h b/include/internal/Hacl_Impl_Blake2_Constants.h
index 185317ba..a5b6d4ce 100644
--- a/include/internal/Hacl_Impl_Blake2_Constants.h
+++ b/include/internal/Hacl_Impl_Blake2_Constants.h
@@ -39,50 +39,30 @@ static const
 uint32_t
 Hacl_Impl_Blake2_Constants_sigmaTable[160U] =
   {
-    (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U, (uint32_t)4U, (uint32_t)5U,
-    (uint32_t)6U, (uint32_t)7U, (uint32_t)8U, (uint32_t)9U, (uint32_t)10U, (uint32_t)11U,
-    (uint32_t)12U, (uint32_t)13U, (uint32_t)14U, (uint32_t)15U, (uint32_t)14U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)8U, (uint32_t)9U, (uint32_t)15U, (uint32_t)13U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)12U, (uint32_t)0U, (uint32_t)2U, (uint32_t)11U, (uint32_t)7U,
-    (uint32_t)5U, (uint32_t)3U, (uint32_t)11U, (uint32_t)8U, (uint32_t)12U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)2U, (uint32_t)15U, (uint32_t)13U, (uint32_t)10U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)6U, (uint32_t)7U, (uint32_t)1U, (uint32_t)9U, (uint32_t)4U,
-    (uint32_t)7U, (uint32_t)9U, (uint32_t)3U, (uint32_t)1U, (uint32_t)13U, (uint32_t)12U,
-    (uint32_t)11U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U, (uint32_t)5U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)0U, (uint32_t)15U, (uint32_t)8U, (uint32_t)9U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)7U, (uint32_t)2U, (uint32_t)4U, (uint32_t)10U, (uint32_t)15U,
-    (uint32_t)14U, (uint32_t)1U, (uint32_t)11U, (uint32_t)12U, (uint32_t)6U, (uint32_t)8U,
-    (uint32_t)3U, (uint32_t)13U, (uint32_t)2U, (uint32_t)12U, (uint32_t)6U, (uint32_t)10U,
-    (uint32_t)0U, (uint32_t)11U, (uint32_t)8U, (uint32_t)3U, (uint32_t)4U, (uint32_t)13U,
-    (uint32_t)7U, (uint32_t)5U, (uint32_t)15U, (uint32_t)14U, (uint32_t)1U, (uint32_t)9U,
-    (uint32_t)12U, (uint32_t)5U, (uint32_t)1U, (uint32_t)15U, (uint32_t)14U, (uint32_t)13U,
-    (uint32_t)4U, (uint32_t)10U, (uint32_t)0U, (uint32_t)7U, (uint32_t)6U, (uint32_t)3U,
-    (uint32_t)9U, (uint32_t)2U, (uint32_t)8U, (uint32_t)11U, (uint32_t)13U, (uint32_t)11U,
-    (uint32_t)7U, (uint32_t)14U, (uint32_t)12U, (uint32_t)1U, (uint32_t)3U, (uint32_t)9U,
-    (uint32_t)5U, (uint32_t)0U, (uint32_t)15U, (uint32_t)4U, (uint32_t)8U, (uint32_t)6U,
-    (uint32_t)2U, (uint32_t)10U, (uint32_t)6U, (uint32_t)15U, (uint32_t)14U, (uint32_t)9U,
-    (uint32_t)11U, (uint32_t)3U, (uint32_t)0U, (uint32_t)8U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)13U, (uint32_t)7U, (uint32_t)1U, (uint32_t)4U, (uint32_t)10U, (uint32_t)5U,
-    (uint32_t)10U, (uint32_t)2U, (uint32_t)8U, (uint32_t)4U, (uint32_t)7U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)5U, (uint32_t)15U, (uint32_t)11U, (uint32_t)9U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)12U, (uint32_t)13U
+    0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U, 12U, 13U, 14U, 15U, 14U, 10U, 4U, 8U, 9U, 15U,
+    13U, 6U, 1U, 12U, 0U, 2U, 11U, 7U, 5U, 3U, 11U, 8U, 12U, 0U, 5U, 2U, 15U, 13U, 10U, 14U, 3U, 6U,
+    7U, 1U, 9U, 4U, 7U, 9U, 3U, 1U, 13U, 12U, 11U, 14U, 2U, 6U, 5U, 10U, 4U, 0U, 15U, 8U, 9U, 0U,
+    5U, 7U, 2U, 4U, 10U, 15U, 14U, 1U, 11U, 12U, 6U, 8U, 3U, 13U, 2U, 12U, 6U, 10U, 0U, 11U, 8U, 3U,
+    4U, 13U, 7U, 5U, 15U, 14U, 1U, 9U, 12U, 5U, 1U, 15U, 14U, 13U, 4U, 10U, 0U, 7U, 6U, 3U, 9U, 2U,
+    8U, 11U, 13U, 11U, 7U, 14U, 12U, 1U, 3U, 9U, 5U, 0U, 15U, 4U, 8U, 6U, 2U, 10U, 6U, 15U, 14U, 9U,
+    11U, 3U, 0U, 8U, 12U, 2U, 13U, 7U, 1U, 4U, 10U, 5U, 10U, 2U, 8U, 4U, 7U, 6U, 1U, 5U, 15U, 11U,
+    9U, 14U, 3U, 12U, 13U
   };
 
 static const
 uint32_t
 Hacl_Impl_Blake2_Constants_ivTable_S[8U] =
   {
-    (uint32_t)0x6A09E667U, (uint32_t)0xBB67AE85U, (uint32_t)0x3C6EF372U, (uint32_t)0xA54FF53AU,
-    (uint32_t)0x510E527FU, (uint32_t)0x9B05688CU, (uint32_t)0x1F83D9ABU, (uint32_t)0x5BE0CD19U
+    0x6A09E667U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, 0x510E527FU, 0x9B05688CU, 0x1F83D9ABU,
+    0x5BE0CD19U
   };
 
 static const
 uint64_t
 Hacl_Impl_Blake2_Constants_ivTable_B[8U] =
   {
-    (uint64_t)0x6A09E667F3BCC908U, (uint64_t)0xBB67AE8584CAA73BU, (uint64_t)0x3C6EF372FE94F82BU,
-    (uint64_t)0xA54FF53A5F1D36F1U, (uint64_t)0x510E527FADE682D1U, (uint64_t)0x9B05688C2B3E6C1FU,
-    (uint64_t)0x1F83D9ABFB41BD6BU, (uint64_t)0x5BE0CD19137E2179U
+    0x6A09E667F3BCC908ULL, 0xBB67AE8584CAA73BULL, 0x3C6EF372FE94F82BULL, 0xA54FF53A5F1D36F1ULL,
+    0x510E527FADE682D1ULL, 0x9B05688C2B3E6C1FULL, 0x1F83D9ABFB41BD6BULL, 0x5BE0CD19137E2179ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Impl_FFDHE_Constants.h b/include/internal/Hacl_Impl_FFDHE_Constants.h
index c746c411..80cbdd52 100644
--- a/include/internal/Hacl_Impl_FFDHE_Constants.h
+++ b/include/internal/Hacl_Impl_FFDHE_Constants.h
@@ -35,528 +35,265 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { (uint8_t)0x02U };
+static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { 0x02U };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p2048[256U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x28U,
-    (uint8_t)0x5CU, (uint8_t)0x97U, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x28U, 0x5CU,
+    0x97U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p3072[384U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0xC6U, (uint8_t)0x2EU, (uint8_t)0x37U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0xC6U, 0x2EU, 0x37U, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p4096[512U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x65U, (uint8_t)0x5FU, (uint8_t)0x6AU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x65U, 0x5FU, 0x6AU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p6144[768U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xD0U, (uint8_t)0xE4U, (uint8_t)0x0EU, (uint8_t)0x65U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xD0U, 0xE4U, 0x0EU, 0x65U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p8192[1024U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xCFU, (uint8_t)0xF4U, (uint8_t)0x6AU, (uint8_t)0xAAU, (uint8_t)0x36U, (uint8_t)0xADU,
-    (uint8_t)0x00U, (uint8_t)0x4CU, (uint8_t)0xF6U, (uint8_t)0x00U, (uint8_t)0xC8U, (uint8_t)0x38U,
-    (uint8_t)0x1EU, (uint8_t)0x42U, (uint8_t)0x5AU, (uint8_t)0x31U, (uint8_t)0xD9U, (uint8_t)0x51U,
-    (uint8_t)0xAEU, (uint8_t)0x64U, (uint8_t)0xFDU, (uint8_t)0xB2U, (uint8_t)0x3FU, (uint8_t)0xCEU,
-    (uint8_t)0xC9U, (uint8_t)0x50U, (uint8_t)0x9DU, (uint8_t)0x43U, (uint8_t)0x68U, (uint8_t)0x7FU,
-    (uint8_t)0xEBU, (uint8_t)0x69U, (uint8_t)0xEDU, (uint8_t)0xD1U, (uint8_t)0xCCU, (uint8_t)0x5EU,
-    (uint8_t)0x0BU, (uint8_t)0x8CU, (uint8_t)0xC3U, (uint8_t)0xBDU, (uint8_t)0xF6U, (uint8_t)0x4BU,
-    (uint8_t)0x10U, (uint8_t)0xEFU, (uint8_t)0x86U, (uint8_t)0xB6U, (uint8_t)0x31U, (uint8_t)0x42U,
-    (uint8_t)0xA3U, (uint8_t)0xABU, (uint8_t)0x88U, (uint8_t)0x29U, (uint8_t)0x55U, (uint8_t)0x5BU,
-    (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x7CU, (uint8_t)0x93U, (uint8_t)0x26U, (uint8_t)0x65U,
-    (uint8_t)0xCBU, (uint8_t)0x2CU, (uint8_t)0x0FU, (uint8_t)0x1CU, (uint8_t)0xC0U, (uint8_t)0x1BU,
-    (uint8_t)0xD7U, (uint8_t)0x02U, (uint8_t)0x29U, (uint8_t)0x38U, (uint8_t)0x88U, (uint8_t)0x39U,
-    (uint8_t)0xD2U, (uint8_t)0xAFU, (uint8_t)0x05U, (uint8_t)0xE4U, (uint8_t)0x54U, (uint8_t)0x50U,
-    (uint8_t)0x4AU, (uint8_t)0xC7U, (uint8_t)0x8BU, (uint8_t)0x75U, (uint8_t)0x82U, (uint8_t)0x82U,
-    (uint8_t)0x28U, (uint8_t)0x46U, (uint8_t)0xC0U, (uint8_t)0xBAU, (uint8_t)0x35U, (uint8_t)0xC3U,
-    (uint8_t)0x5FU, (uint8_t)0x5CU, (uint8_t)0x59U, (uint8_t)0x16U, (uint8_t)0x0CU, (uint8_t)0xC0U,
-    (uint8_t)0x46U, (uint8_t)0xFDU, (uint8_t)0x82U, (uint8_t)0x51U, (uint8_t)0x54U, (uint8_t)0x1FU,
-    (uint8_t)0xC6U, (uint8_t)0x8CU, (uint8_t)0x9CU, (uint8_t)0x86U, (uint8_t)0xB0U, (uint8_t)0x22U,
-    (uint8_t)0xBBU, (uint8_t)0x70U, (uint8_t)0x99U, (uint8_t)0x87U, (uint8_t)0x6AU, (uint8_t)0x46U,
-    (uint8_t)0x0EU, (uint8_t)0x74U, (uint8_t)0x51U, (uint8_t)0xA8U, (uint8_t)0xA9U, (uint8_t)0x31U,
-    (uint8_t)0x09U, (uint8_t)0x70U, (uint8_t)0x3FU, (uint8_t)0xEEU, (uint8_t)0x1CU, (uint8_t)0x21U,
-    (uint8_t)0x7EU, (uint8_t)0x6CU, (uint8_t)0x38U, (uint8_t)0x26U, (uint8_t)0xE5U, (uint8_t)0x2CU,
-    (uint8_t)0x51U, (uint8_t)0xAAU, (uint8_t)0x69U, (uint8_t)0x1EU, (uint8_t)0x0EU, (uint8_t)0x42U,
-    (uint8_t)0x3CU, (uint8_t)0xFCU, (uint8_t)0x99U, (uint8_t)0xE9U, (uint8_t)0xE3U, (uint8_t)0x16U,
-    (uint8_t)0x50U, (uint8_t)0xC1U, (uint8_t)0x21U, (uint8_t)0x7BU, (uint8_t)0x62U, (uint8_t)0x48U,
-    (uint8_t)0x16U, (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x9AU, (uint8_t)0x95U, (uint8_t)0xF9U,
-    (uint8_t)0xD5U, (uint8_t)0xB8U, (uint8_t)0x01U, (uint8_t)0x94U, (uint8_t)0x88U, (uint8_t)0xD9U,
-    (uint8_t)0xC0U, (uint8_t)0xA0U, (uint8_t)0xA1U, (uint8_t)0xFEU, (uint8_t)0x30U, (uint8_t)0x75U,
-    (uint8_t)0xA5U, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0x31U, (uint8_t)0x83U, (uint8_t)0xF8U,
-    (uint8_t)0x1DU, (uint8_t)0x4AU, (uint8_t)0x3FU, (uint8_t)0x2FU, (uint8_t)0xA4U, (uint8_t)0x57U,
-    (uint8_t)0x1EU, (uint8_t)0xFCU, (uint8_t)0x8CU, (uint8_t)0xE0U, (uint8_t)0xBAU, (uint8_t)0x8AU,
-    (uint8_t)0x4FU, (uint8_t)0xE8U, (uint8_t)0xB6U, (uint8_t)0x85U, (uint8_t)0x5DU, (uint8_t)0xFEU,
-    (uint8_t)0x72U, (uint8_t)0xB0U, (uint8_t)0xA6U, (uint8_t)0x6EU, (uint8_t)0xDEU, (uint8_t)0xD2U,
-    (uint8_t)0xFBU, (uint8_t)0xABU, (uint8_t)0xFBU, (uint8_t)0xE5U, (uint8_t)0x8AU, (uint8_t)0x30U,
-    (uint8_t)0xFAU, (uint8_t)0xFAU, (uint8_t)0xBEU, (uint8_t)0x1CU, (uint8_t)0x5DU, (uint8_t)0x71U,
-    (uint8_t)0xA8U, (uint8_t)0x7EU, (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x1EU, (uint8_t)0xF8U,
-    (uint8_t)0xC1U, (uint8_t)0xFEU, (uint8_t)0x86U, (uint8_t)0xFEU, (uint8_t)0xA6U, (uint8_t)0xBBU,
-    (uint8_t)0xFDU, (uint8_t)0xE5U, (uint8_t)0x30U, (uint8_t)0x67U, (uint8_t)0x7FU, (uint8_t)0x0DU,
-    (uint8_t)0x97U, (uint8_t)0xD1U, (uint8_t)0x1DU, (uint8_t)0x49U, (uint8_t)0xF7U, (uint8_t)0xA8U,
-    (uint8_t)0x44U, (uint8_t)0x3DU, (uint8_t)0x08U, (uint8_t)0x22U, (uint8_t)0xE5U, (uint8_t)0x06U,
-    (uint8_t)0xA9U, (uint8_t)0xF4U, (uint8_t)0x61U, (uint8_t)0x4EU, (uint8_t)0x01U, (uint8_t)0x1EU,
-    (uint8_t)0x2AU, (uint8_t)0x94U, (uint8_t)0x83U, (uint8_t)0x8FU, (uint8_t)0xF8U, (uint8_t)0x8CU,
-    (uint8_t)0xD6U, (uint8_t)0x8CU, (uint8_t)0x8BU, (uint8_t)0xB7U, (uint8_t)0xC5U, (uint8_t)0xC6U,
-    (uint8_t)0x42U, (uint8_t)0x4CU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xCFU, 0xF4U, 0x6AU, 0xAAU, 0x36U, 0xADU, 0x00U, 0x4CU, 0xF6U, 0x00U, 0xC8U,
+    0x38U, 0x1EU, 0x42U, 0x5AU, 0x31U, 0xD9U, 0x51U, 0xAEU, 0x64U, 0xFDU, 0xB2U, 0x3FU, 0xCEU,
+    0xC9U, 0x50U, 0x9DU, 0x43U, 0x68U, 0x7FU, 0xEBU, 0x69U, 0xEDU, 0xD1U, 0xCCU, 0x5EU, 0x0BU,
+    0x8CU, 0xC3U, 0xBDU, 0xF6U, 0x4BU, 0x10U, 0xEFU, 0x86U, 0xB6U, 0x31U, 0x42U, 0xA3U, 0xABU,
+    0x88U, 0x29U, 0x55U, 0x5BU, 0x2FU, 0x74U, 0x7CU, 0x93U, 0x26U, 0x65U, 0xCBU, 0x2CU, 0x0FU,
+    0x1CU, 0xC0U, 0x1BU, 0xD7U, 0x02U, 0x29U, 0x38U, 0x88U, 0x39U, 0xD2U, 0xAFU, 0x05U, 0xE4U,
+    0x54U, 0x50U, 0x4AU, 0xC7U, 0x8BU, 0x75U, 0x82U, 0x82U, 0x28U, 0x46U, 0xC0U, 0xBAU, 0x35U,
+    0xC3U, 0x5FU, 0x5CU, 0x59U, 0x16U, 0x0CU, 0xC0U, 0x46U, 0xFDU, 0x82U, 0x51U, 0x54U, 0x1FU,
+    0xC6U, 0x8CU, 0x9CU, 0x86U, 0xB0U, 0x22U, 0xBBU, 0x70U, 0x99U, 0x87U, 0x6AU, 0x46U, 0x0EU,
+    0x74U, 0x51U, 0xA8U, 0xA9U, 0x31U, 0x09U, 0x70U, 0x3FU, 0xEEU, 0x1CU, 0x21U, 0x7EU, 0x6CU,
+    0x38U, 0x26U, 0xE5U, 0x2CU, 0x51U, 0xAAU, 0x69U, 0x1EU, 0x0EU, 0x42U, 0x3CU, 0xFCU, 0x99U,
+    0xE9U, 0xE3U, 0x16U, 0x50U, 0xC1U, 0x21U, 0x7BU, 0x62U, 0x48U, 0x16U, 0xCDU, 0xADU, 0x9AU,
+    0x95U, 0xF9U, 0xD5U, 0xB8U, 0x01U, 0x94U, 0x88U, 0xD9U, 0xC0U, 0xA0U, 0xA1U, 0xFEU, 0x30U,
+    0x75U, 0xA5U, 0x77U, 0xE2U, 0x31U, 0x83U, 0xF8U, 0x1DU, 0x4AU, 0x3FU, 0x2FU, 0xA4U, 0x57U,
+    0x1EU, 0xFCU, 0x8CU, 0xE0U, 0xBAU, 0x8AU, 0x4FU, 0xE8U, 0xB6U, 0x85U, 0x5DU, 0xFEU, 0x72U,
+    0xB0U, 0xA6U, 0x6EU, 0xDEU, 0xD2U, 0xFBU, 0xABU, 0xFBU, 0xE5U, 0x8AU, 0x30U, 0xFAU, 0xFAU,
+    0xBEU, 0x1CU, 0x5DU, 0x71U, 0xA8U, 0x7EU, 0x2FU, 0x74U, 0x1EU, 0xF8U, 0xC1U, 0xFEU, 0x86U,
+    0xFEU, 0xA6U, 0xBBU, 0xFDU, 0xE5U, 0x30U, 0x67U, 0x7FU, 0x0DU, 0x97U, 0xD1U, 0x1DU, 0x49U,
+    0xF7U, 0xA8U, 0x44U, 0x3DU, 0x08U, 0x22U, 0xE5U, 0x06U, 0xA9U, 0xF4U, 0x61U, 0x4EU, 0x01U,
+    0x1EU, 0x2AU, 0x94U, 0x83U, 0x8FU, 0xF8U, 0x8CU, 0xD6U, 0x8CU, 0x8BU, 0xB7U, 0xC5U, 0xC6U,
+    0x42U, 0x4CU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_K256_PrecompTable.h b/include/internal/Hacl_K256_PrecompTable.h
index 26bdfa1f..ff15f1c9 100644
--- a/include/internal/Hacl_K256_PrecompTable.h
+++ b/include/internal/Hacl_K256_PrecompTable.h
@@ -39,498 +39,378 @@ static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)4496295042185355U,
-    (uint64_t)3125448202219451U, (uint64_t)1239608518490046U, (uint64_t)2687445637493112U,
-    (uint64_t)77979604880139U, (uint64_t)3360310474215011U, (uint64_t)1216410458165163U,
-    (uint64_t)177901593587973U, (uint64_t)3209978938104985U, (uint64_t)118285133003718U,
-    (uint64_t)434519962075150U, (uint64_t)1114612377498854U, (uint64_t)3488596944003813U,
-    (uint64_t)450716531072892U, (uint64_t)66044973203836U, (uint64_t)2822827191156652U,
-    (uint64_t)2417714248626059U, (uint64_t)2173117567943U, (uint64_t)961513119252459U,
-    (uint64_t)233852556538333U, (uint64_t)3014783730323962U, (uint64_t)2955192634004574U,
-    (uint64_t)580546524951282U, (uint64_t)2982973948711252U, (uint64_t)226295722018730U,
-    (uint64_t)26457116218543U, (uint64_t)3401523493637663U, (uint64_t)2597746825024790U,
-    (uint64_t)1789211180483113U, (uint64_t)155862365823427U, (uint64_t)4056806876632134U,
-    (uint64_t)1742291745730568U, (uint64_t)3527759000626890U, (uint64_t)3740578471192596U,
-    (uint64_t)177295097700537U, (uint64_t)1533961415657770U, (uint64_t)4305228982382487U,
-    (uint64_t)4069090871282711U, (uint64_t)4090877481646667U, (uint64_t)220939617041498U,
-    (uint64_t)2057548127959588U, (uint64_t)45185623103252U, (uint64_t)2871963270423449U,
-    (uint64_t)3312974792248749U, (uint64_t)8710601879528U, (uint64_t)570612225194540U,
-    (uint64_t)2045632925323972U, (uint64_t)1263913878297555U, (uint64_t)1294592284757719U,
-    (uint64_t)238067747295054U, (uint64_t)1576659948829386U, (uint64_t)2315159636629917U,
-    (uint64_t)3624867787891655U, (uint64_t)647628266663887U, (uint64_t)75788399640253U,
-    (uint64_t)710811707847797U, (uint64_t)130020650130128U, (uint64_t)1975045425972589U,
-    (uint64_t)136351545314094U, (uint64_t)229292031212337U, (uint64_t)1061471455264148U,
-    (uint64_t)3281312694184822U, (uint64_t)1692442293921797U, (uint64_t)4171008525509513U,
-    (uint64_t)275424696197549U, (uint64_t)1170296303921965U, (uint64_t)4154092952807735U,
-    (uint64_t)4371262070870741U, (uint64_t)835769811036496U, (uint64_t)275812646528189U,
-    (uint64_t)4006745785521764U, (uint64_t)1965172239781114U, (uint64_t)4121055644916429U,
-    (uint64_t)3578995380229569U, (uint64_t)169798870760022U, (uint64_t)1834234783016431U,
-    (uint64_t)3186919121688538U, (uint64_t)1894269993170652U, (uint64_t)868603832348691U,
-    (uint64_t)110978471368876U, (uint64_t)1659296605881532U, (uint64_t)3257830829309297U,
-    (uint64_t)3381509832701119U, (uint64_t)4016163121121296U, (uint64_t)265240263496294U,
-    (uint64_t)4411285343933251U, (uint64_t)728746770806400U, (uint64_t)1767819098558739U,
-    (uint64_t)3002081480892841U, (uint64_t)96312133241935U, (uint64_t)468184501392107U,
-    (uint64_t)2061529496271208U, (uint64_t)801565111628867U, (uint64_t)3380678576799273U,
-    (uint64_t)121814978170941U, (uint64_t)3340363319165433U, (uint64_t)2764604325746928U,
-    (uint64_t)4475755976431968U, (uint64_t)3678073419927081U, (uint64_t)237001357924061U,
-    (uint64_t)4110487014553450U, (uint64_t)442517757833404U, (uint64_t)3976758767423859U,
-    (uint64_t)2559863799262476U, (uint64_t)178144664279213U, (uint64_t)2488702171798051U,
-    (uint64_t)4292079598620208U, (uint64_t)1642918280217329U, (uint64_t)3694920319798108U,
-    (uint64_t)111735528281657U, (uint64_t)2904433967156033U, (uint64_t)4391518032143166U,
-    (uint64_t)3018885875516259U, (uint64_t)3730342681447122U, (uint64_t)10320273322750U,
-    (uint64_t)555845881555519U, (uint64_t)58355404017985U, (uint64_t)379009359053696U,
-    (uint64_t)450317203955503U, (uint64_t)271063299686173U, (uint64_t)910340241794202U,
-    (uint64_t)4145234574853890U, (uint64_t)2059755654702755U, (uint64_t)626530377112246U,
-    (uint64_t)188918989156857U, (uint64_t)3316657461542117U, (uint64_t)778033563170765U,
-    (uint64_t)3568562306532187U, (uint64_t)2888619469733481U, (uint64_t)4364919962337U,
-    (uint64_t)4095057288587059U, (uint64_t)2275461355379988U, (uint64_t)1507422995910897U,
-    (uint64_t)3737691697116252U, (uint64_t)28779913258578U, (uint64_t)131453301647952U,
-    (uint64_t)3613515597508469U, (uint64_t)2389606941441321U, (uint64_t)2135459302594806U,
-    (uint64_t)105517262484263U, (uint64_t)2973432939331401U, (uint64_t)3447096622477885U,
-    (uint64_t)684654106536844U, (uint64_t)2815198316729695U, (uint64_t)280303067216071U,
-    (uint64_t)1841014812927024U, (uint64_t)1181026273060917U, (uint64_t)4092989148457730U,
-    (uint64_t)1381045116206278U, (uint64_t)112475725893965U, (uint64_t)2309144740156686U,
-    (uint64_t)1558825847609352U, (uint64_t)2008068002046292U, (uint64_t)3153511625856423U,
-    (uint64_t)38469701427673U, (uint64_t)4240572315518056U, (uint64_t)2295170987320580U,
-    (uint64_t)187734093837094U, (uint64_t)301041528077172U, (uint64_t)234553141005715U,
-    (uint64_t)4170513699279606U, (uint64_t)1600132848196146U, (uint64_t)3149113064155689U,
-    (uint64_t)2733255352600949U, (uint64_t)144915931419495U, (uint64_t)1221012073888926U,
-    (uint64_t)4395668111081710U, (uint64_t)2464799161496070U, (uint64_t)3664256125241313U,
-    (uint64_t)239705368981290U, (uint64_t)1415181408539490U, (uint64_t)2551836620449074U,
-    (uint64_t)3003106895689578U, (uint64_t)968947218886924U, (uint64_t)270781532362673U,
-    (uint64_t)2905980714350372U, (uint64_t)3246927349288975U, (uint64_t)2653377642686974U,
-    (uint64_t)1577457093418263U, (uint64_t)279488238785848U, (uint64_t)568335962564552U,
-    (uint64_t)4251365041645758U, (uint64_t)1257832559776007U, (uint64_t)2424022444243863U,
-    (uint64_t)261166122046343U, (uint64_t)4399874608082116U, (uint64_t)640509987891568U,
-    (uint64_t)3119706885332220U, (uint64_t)1990185416694007U, (uint64_t)119390098529341U,
-    (uint64_t)220106534694050U, (uint64_t)937225880034895U, (uint64_t)656288151358882U,
-    (uint64_t)1766967254772100U, (uint64_t)197900790969750U, (uint64_t)2992539221608875U,
-    (uint64_t)3960297171111858U, (uint64_t)3499202002925081U, (uint64_t)1103060980924705U,
-    (uint64_t)13670895919578U, (uint64_t)430132744187721U, (uint64_t)1206771838050953U,
-    (uint64_t)2474749300167198U, (uint64_t)296299539510780U, (uint64_t)61565517686436U,
-    (uint64_t)752778559080573U, (uint64_t)3049015829565410U, (uint64_t)3538647632527371U,
-    (uint64_t)1640473028662032U, (uint64_t)182488721849306U, (uint64_t)1234378482161516U,
-    (uint64_t)3736205988606381U, (uint64_t)2814216844344487U, (uint64_t)3877249891529557U,
-    (uint64_t)51681412928433U, (uint64_t)4275336620301239U, (uint64_t)3084074032750651U,
-    (uint64_t)42732308350456U, (uint64_t)3648603591552229U, (uint64_t)142450621701603U,
-    (uint64_t)4020045475009854U, (uint64_t)1050293952073054U, (uint64_t)1974773673079851U,
-    (uint64_t)1815515638724020U, (uint64_t)104845375825434U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+    77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+    3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+    3488596944003813ULL, 450716531072892ULL, 66044973203836ULL, 2822827191156652ULL,
+    2417714248626059ULL, 2173117567943ULL, 961513119252459ULL, 233852556538333ULL,
+    3014783730323962ULL, 2955192634004574ULL, 580546524951282ULL, 2982973948711252ULL,
+    226295722018730ULL, 26457116218543ULL, 3401523493637663ULL, 2597746825024790ULL,
+    1789211180483113ULL, 155862365823427ULL, 4056806876632134ULL, 1742291745730568ULL,
+    3527759000626890ULL, 3740578471192596ULL, 177295097700537ULL, 1533961415657770ULL,
+    4305228982382487ULL, 4069090871282711ULL, 4090877481646667ULL, 220939617041498ULL,
+    2057548127959588ULL, 45185623103252ULL, 2871963270423449ULL, 3312974792248749ULL,
+    8710601879528ULL, 570612225194540ULL, 2045632925323972ULL, 1263913878297555ULL,
+    1294592284757719ULL, 238067747295054ULL, 1576659948829386ULL, 2315159636629917ULL,
+    3624867787891655ULL, 647628266663887ULL, 75788399640253ULL, 710811707847797ULL,
+    130020650130128ULL, 1975045425972589ULL, 136351545314094ULL, 229292031212337ULL,
+    1061471455264148ULL, 3281312694184822ULL, 1692442293921797ULL, 4171008525509513ULL,
+    275424696197549ULL, 1170296303921965ULL, 4154092952807735ULL, 4371262070870741ULL,
+    835769811036496ULL, 275812646528189ULL, 4006745785521764ULL, 1965172239781114ULL,
+    4121055644916429ULL, 3578995380229569ULL, 169798870760022ULL, 1834234783016431ULL,
+    3186919121688538ULL, 1894269993170652ULL, 868603832348691ULL, 110978471368876ULL,
+    1659296605881532ULL, 3257830829309297ULL, 3381509832701119ULL, 4016163121121296ULL,
+    265240263496294ULL, 4411285343933251ULL, 728746770806400ULL, 1767819098558739ULL,
+    3002081480892841ULL, 96312133241935ULL, 468184501392107ULL, 2061529496271208ULL,
+    801565111628867ULL, 3380678576799273ULL, 121814978170941ULL, 3340363319165433ULL,
+    2764604325746928ULL, 4475755976431968ULL, 3678073419927081ULL, 237001357924061ULL,
+    4110487014553450ULL, 442517757833404ULL, 3976758767423859ULL, 2559863799262476ULL,
+    178144664279213ULL, 2488702171798051ULL, 4292079598620208ULL, 1642918280217329ULL,
+    3694920319798108ULL, 111735528281657ULL, 2904433967156033ULL, 4391518032143166ULL,
+    3018885875516259ULL, 3730342681447122ULL, 10320273322750ULL, 555845881555519ULL,
+    58355404017985ULL, 379009359053696ULL, 450317203955503ULL, 271063299686173ULL,
+    910340241794202ULL, 4145234574853890ULL, 2059755654702755ULL, 626530377112246ULL,
+    188918989156857ULL, 3316657461542117ULL, 778033563170765ULL, 3568562306532187ULL,
+    2888619469733481ULL, 4364919962337ULL, 4095057288587059ULL, 2275461355379988ULL,
+    1507422995910897ULL, 3737691697116252ULL, 28779913258578ULL, 131453301647952ULL,
+    3613515597508469ULL, 2389606941441321ULL, 2135459302594806ULL, 105517262484263ULL,
+    2973432939331401ULL, 3447096622477885ULL, 684654106536844ULL, 2815198316729695ULL,
+    280303067216071ULL, 1841014812927024ULL, 1181026273060917ULL, 4092989148457730ULL,
+    1381045116206278ULL, 112475725893965ULL, 2309144740156686ULL, 1558825847609352ULL,
+    2008068002046292ULL, 3153511625856423ULL, 38469701427673ULL, 4240572315518056ULL,
+    2295170987320580ULL, 187734093837094ULL, 301041528077172ULL, 234553141005715ULL,
+    4170513699279606ULL, 1600132848196146ULL, 3149113064155689ULL, 2733255352600949ULL,
+    144915931419495ULL, 1221012073888926ULL, 4395668111081710ULL, 2464799161496070ULL,
+    3664256125241313ULL, 239705368981290ULL, 1415181408539490ULL, 2551836620449074ULL,
+    3003106895689578ULL, 968947218886924ULL, 270781532362673ULL, 2905980714350372ULL,
+    3246927349288975ULL, 2653377642686974ULL, 1577457093418263ULL, 279488238785848ULL,
+    568335962564552ULL, 4251365041645758ULL, 1257832559776007ULL, 2424022444243863ULL,
+    261166122046343ULL, 4399874608082116ULL, 640509987891568ULL, 3119706885332220ULL,
+    1990185416694007ULL, 119390098529341ULL, 220106534694050ULL, 937225880034895ULL,
+    656288151358882ULL, 1766967254772100ULL, 197900790969750ULL, 2992539221608875ULL,
+    3960297171111858ULL, 3499202002925081ULL, 1103060980924705ULL, 13670895919578ULL,
+    430132744187721ULL, 1206771838050953ULL, 2474749300167198ULL, 296299539510780ULL,
+    61565517686436ULL, 752778559080573ULL, 3049015829565410ULL, 3538647632527371ULL,
+    1640473028662032ULL, 182488721849306ULL, 1234378482161516ULL, 3736205988606381ULL,
+    2814216844344487ULL, 3877249891529557ULL, 51681412928433ULL, 4275336620301239ULL,
+    3084074032750651ULL, 42732308350456ULL, 3648603591552229ULL, 142450621701603ULL,
+    4020045475009854ULL, 1050293952073054ULL, 1974773673079851ULL, 1815515638724020ULL,
+    104845375825434ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1277614565900951U,
-    (uint64_t)378671684419493U, (uint64_t)3176260448102880U, (uint64_t)1575691435565077U,
-    (uint64_t)167304528382180U, (uint64_t)2600787765776588U, (uint64_t)7497946149293U,
-    (uint64_t)2184272641272202U, (uint64_t)2200235265236628U, (uint64_t)265969268774814U,
-    (uint64_t)1913228635640715U, (uint64_t)2831959046949342U, (uint64_t)888030405442963U,
-    (uint64_t)1817092932985033U, (uint64_t)101515844997121U, (uint64_t)3309468394859588U,
-    (uint64_t)3965334773689948U, (uint64_t)1945272965790738U, (uint64_t)4450939211427964U,
-    (uint64_t)211349698782702U, (uint64_t)2085160302160079U, (uint64_t)212812506072603U,
-    (uint64_t)3646122434511764U, (uint64_t)1711405092320514U, (uint64_t)95160920508464U,
-    (uint64_t)1677683368518073U, (uint64_t)4384656939250953U, (uint64_t)3548591046529893U,
-    (uint64_t)1683233536091384U, (uint64_t)105919586159941U, (uint64_t)1941416002726455U,
-    (uint64_t)246264372248216U, (uint64_t)3063044110922228U, (uint64_t)3772292170415825U,
-    (uint64_t)222933374989815U, (uint64_t)2417211163452935U, (uint64_t)2018230365573200U,
-    (uint64_t)1985974538911047U, (uint64_t)1387197705332739U, (uint64_t)186400825584956U,
-    (uint64_t)2469330487750329U, (uint64_t)1291983813301638U, (uint64_t)333416733706302U,
-    (uint64_t)3413315564261070U, (uint64_t)189444777569683U, (uint64_t)1062005622360420U,
-    (uint64_t)1800197715938740U, (uint64_t)3693110992551647U, (uint64_t)626990328941945U,
-    (uint64_t)40998857100520U, (uint64_t)3921983552805085U, (uint64_t)1016632437340656U,
-    (uint64_t)4016615929950878U, (uint64_t)2682554586771281U, (uint64_t)7043555162389U,
-    (uint64_t)3333819830676567U, (uint64_t)4120091964944036U, (uint64_t)1960788263484015U,
-    (uint64_t)1642145656273304U, (uint64_t)252814075789128U, (uint64_t)3085777342821357U,
-    (uint64_t)4166637997604052U, (uint64_t)1339401689756469U, (uint64_t)845938529607551U,
-    (uint64_t)223351828189283U, (uint64_t)1148648705186890U, (uint64_t)1230525014760605U,
-    (uint64_t)1869739475126720U, (uint64_t)4193966261205530U, (uint64_t)175684010336013U,
-    (uint64_t)4476719358931508U, (uint64_t)4209547487457638U, (uint64_t)2197536411673724U,
-    (uint64_t)3010838433412303U, (uint64_t)169318997251483U, (uint64_t)49493868302162U,
-    (uint64_t)3594601099078584U, (uint64_t)3662420905445942U, (uint64_t)3606544932233685U,
-    (uint64_t)270643652662165U, (uint64_t)180681786228544U, (uint64_t)2095882682308564U,
-    (uint64_t)813484483841391U, (uint64_t)1622665392824698U, (uint64_t)113821770225137U,
-    (uint64_t)3075432444115417U, (uint64_t)716502989978722U, (uint64_t)2304779892217245U,
-    (uint64_t)1760144151770127U, (uint64_t)235719156963938U, (uint64_t)3180013070471143U,
-    (uint64_t)1331027634540579U, (uint64_t)552273022992392U, (uint64_t)2858693077461887U,
-    (uint64_t)197914407731510U, (uint64_t)187252310910959U, (uint64_t)4160637171377125U,
-    (uint64_t)3225059526713298U, (uint64_t)2574558217383978U, (uint64_t)249695600622489U,
-    (uint64_t)364988742814327U, (uint64_t)4245298536326258U, (uint64_t)1812464706589342U,
-    (uint64_t)2734857123772998U, (uint64_t)120105577124628U, (uint64_t)160179251271109U,
-    (uint64_t)3604555733307834U, (uint64_t)150380003195715U, (uint64_t)1574304909935121U,
-    (uint64_t)142190285600761U, (uint64_t)1835385847725651U, (uint64_t)3168087139615901U,
-    (uint64_t)3201434861713736U, (uint64_t)741757984537760U, (uint64_t)163585009419543U,
-    (uint64_t)3837997981109783U, (uint64_t)3771946407870997U, (uint64_t)2867641360295452U,
-    (uint64_t)3097548691501578U, (uint64_t)124624912142104U, (uint64_t)2729896088769328U,
-    (uint64_t)1087786827035225U, (uint64_t)3934000813818614U, (uint64_t)1176792318645055U,
-    (uint64_t)125311882169270U, (uint64_t)3530709439299502U, (uint64_t)1561477829834527U,
-    (uint64_t)3927894570196761U, (uint64_t)3957765307669212U, (uint64_t)105720519513730U,
-    (uint64_t)3758969845816997U, (uint64_t)2738320452287300U, (uint64_t)2380753632109507U,
-    (uint64_t)2762090901149075U, (uint64_t)123455059136515U, (uint64_t)4222807813169807U,
-    (uint64_t)118064783651432U, (uint64_t)2877694712254934U, (uint64_t)3535027426396448U,
-    (uint64_t)100175663703417U, (uint64_t)3287921121213155U, (uint64_t)4497246481824206U,
-    (uint64_t)1960809949007025U, (uint64_t)3236854264159102U, (uint64_t)35028112623717U,
-    (uint64_t)338838627913273U, (uint64_t)2827531947914645U, (uint64_t)4231826783810670U,
-    (uint64_t)1082490106100389U, (uint64_t)13267544387448U, (uint64_t)4249975884259105U,
-    (uint64_t)2844862161652484U, (uint64_t)262742197948971U, (uint64_t)3525653802457116U,
-    (uint64_t)269963889261701U, (uint64_t)3690062482117102U, (uint64_t)675413453822147U,
-    (uint64_t)2170937868437574U, (uint64_t)2367632187022010U, (uint64_t)214032802409445U,
-    (uint64_t)2054007379612477U, (uint64_t)3558050826739009U, (uint64_t)266827184752634U,
-    (uint64_t)1946520293291195U, (uint64_t)238087872386556U, (uint64_t)490056555385700U,
-    (uint64_t)794405769357386U, (uint64_t)3886901294859702U, (uint64_t)3120414548626348U,
-    (uint64_t)84316625221136U, (uint64_t)223073962531835U, (uint64_t)4280846460577631U,
-    (uint64_t)344296282849308U, (uint64_t)3522116652699457U, (uint64_t)171817232053075U,
-    (uint64_t)3296636283062273U, (uint64_t)3587303364425579U, (uint64_t)1033485783633331U,
-    (uint64_t)3686984130812906U, (uint64_t)268290803650477U, (uint64_t)2803988215834467U,
-    (uint64_t)3821246410529720U, (uint64_t)1077722388925870U, (uint64_t)4187137036866164U,
-    (uint64_t)104696540795905U, (uint64_t)998770003854764U, (uint64_t)3960768137535019U,
-    (uint64_t)4293792474919135U, (uint64_t)3251297981727034U, (uint64_t)192479028790101U,
-    (uint64_t)1175880869349935U, (uint64_t)3506949259311937U, (uint64_t)2161711516160714U,
-    (uint64_t)2506820922270187U, (uint64_t)131002200661047U, (uint64_t)3532399477339994U,
-    (uint64_t)2515815721228719U, (uint64_t)4274974119021502U, (uint64_t)265752394510924U,
-    (uint64_t)163144272153395U, (uint64_t)2824260010502991U, (uint64_t)517077012665142U,
-    (uint64_t)602987073882924U, (uint64_t)2939630061751780U, (uint64_t)59211609557440U,
-    (uint64_t)963423614549333U, (uint64_t)495476232754434U, (uint64_t)94274496109103U,
-    (uint64_t)2245136222990187U, (uint64_t)185414764872288U, (uint64_t)2266067668609289U,
-    (uint64_t)3873978896235927U, (uint64_t)4428283513152105U, (uint64_t)3881481480259312U,
-    (uint64_t)207746202010862U, (uint64_t)1609437858011364U, (uint64_t)477585758421515U,
-    (uint64_t)3850430788664649U, (uint64_t)2682299074459173U, (uint64_t)149439089751274U,
-    (uint64_t)3665760243877698U, (uint64_t)1356661512658931U, (uint64_t)1675903262368322U,
-    (uint64_t)3355649228050892U, (uint64_t)99772108898412U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+    167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+    2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+    888030405442963ULL, 1817092932985033ULL, 101515844997121ULL, 3309468394859588ULL,
+    3965334773689948ULL, 1945272965790738ULL, 4450939211427964ULL, 211349698782702ULL,
+    2085160302160079ULL, 212812506072603ULL, 3646122434511764ULL, 1711405092320514ULL,
+    95160920508464ULL, 1677683368518073ULL, 4384656939250953ULL, 3548591046529893ULL,
+    1683233536091384ULL, 105919586159941ULL, 1941416002726455ULL, 246264372248216ULL,
+    3063044110922228ULL, 3772292170415825ULL, 222933374989815ULL, 2417211163452935ULL,
+    2018230365573200ULL, 1985974538911047ULL, 1387197705332739ULL, 186400825584956ULL,
+    2469330487750329ULL, 1291983813301638ULL, 333416733706302ULL, 3413315564261070ULL,
+    189444777569683ULL, 1062005622360420ULL, 1800197715938740ULL, 3693110992551647ULL,
+    626990328941945ULL, 40998857100520ULL, 3921983552805085ULL, 1016632437340656ULL,
+    4016615929950878ULL, 2682554586771281ULL, 7043555162389ULL, 3333819830676567ULL,
+    4120091964944036ULL, 1960788263484015ULL, 1642145656273304ULL, 252814075789128ULL,
+    3085777342821357ULL, 4166637997604052ULL, 1339401689756469ULL, 845938529607551ULL,
+    223351828189283ULL, 1148648705186890ULL, 1230525014760605ULL, 1869739475126720ULL,
+    4193966261205530ULL, 175684010336013ULL, 4476719358931508ULL, 4209547487457638ULL,
+    2197536411673724ULL, 3010838433412303ULL, 169318997251483ULL, 49493868302162ULL,
+    3594601099078584ULL, 3662420905445942ULL, 3606544932233685ULL, 270643652662165ULL,
+    180681786228544ULL, 2095882682308564ULL, 813484483841391ULL, 1622665392824698ULL,
+    113821770225137ULL, 3075432444115417ULL, 716502989978722ULL, 2304779892217245ULL,
+    1760144151770127ULL, 235719156963938ULL, 3180013070471143ULL, 1331027634540579ULL,
+    552273022992392ULL, 2858693077461887ULL, 197914407731510ULL, 187252310910959ULL,
+    4160637171377125ULL, 3225059526713298ULL, 2574558217383978ULL, 249695600622489ULL,
+    364988742814327ULL, 4245298536326258ULL, 1812464706589342ULL, 2734857123772998ULL,
+    120105577124628ULL, 160179251271109ULL, 3604555733307834ULL, 150380003195715ULL,
+    1574304909935121ULL, 142190285600761ULL, 1835385847725651ULL, 3168087139615901ULL,
+    3201434861713736ULL, 741757984537760ULL, 163585009419543ULL, 3837997981109783ULL,
+    3771946407870997ULL, 2867641360295452ULL, 3097548691501578ULL, 124624912142104ULL,
+    2729896088769328ULL, 1087786827035225ULL, 3934000813818614ULL, 1176792318645055ULL,
+    125311882169270ULL, 3530709439299502ULL, 1561477829834527ULL, 3927894570196761ULL,
+    3957765307669212ULL, 105720519513730ULL, 3758969845816997ULL, 2738320452287300ULL,
+    2380753632109507ULL, 2762090901149075ULL, 123455059136515ULL, 4222807813169807ULL,
+    118064783651432ULL, 2877694712254934ULL, 3535027426396448ULL, 100175663703417ULL,
+    3287921121213155ULL, 4497246481824206ULL, 1960809949007025ULL, 3236854264159102ULL,
+    35028112623717ULL, 338838627913273ULL, 2827531947914645ULL, 4231826783810670ULL,
+    1082490106100389ULL, 13267544387448ULL, 4249975884259105ULL, 2844862161652484ULL,
+    262742197948971ULL, 3525653802457116ULL, 269963889261701ULL, 3690062482117102ULL,
+    675413453822147ULL, 2170937868437574ULL, 2367632187022010ULL, 214032802409445ULL,
+    2054007379612477ULL, 3558050826739009ULL, 266827184752634ULL, 1946520293291195ULL,
+    238087872386556ULL, 490056555385700ULL, 794405769357386ULL, 3886901294859702ULL,
+    3120414548626348ULL, 84316625221136ULL, 223073962531835ULL, 4280846460577631ULL,
+    344296282849308ULL, 3522116652699457ULL, 171817232053075ULL, 3296636283062273ULL,
+    3587303364425579ULL, 1033485783633331ULL, 3686984130812906ULL, 268290803650477ULL,
+    2803988215834467ULL, 3821246410529720ULL, 1077722388925870ULL, 4187137036866164ULL,
+    104696540795905ULL, 998770003854764ULL, 3960768137535019ULL, 4293792474919135ULL,
+    3251297981727034ULL, 192479028790101ULL, 1175880869349935ULL, 3506949259311937ULL,
+    2161711516160714ULL, 2506820922270187ULL, 131002200661047ULL, 3532399477339994ULL,
+    2515815721228719ULL, 4274974119021502ULL, 265752394510924ULL, 163144272153395ULL,
+    2824260010502991ULL, 517077012665142ULL, 602987073882924ULL, 2939630061751780ULL,
+    59211609557440ULL, 963423614549333ULL, 495476232754434ULL, 94274496109103ULL,
+    2245136222990187ULL, 185414764872288ULL, 2266067668609289ULL, 3873978896235927ULL,
+    4428283513152105ULL, 3881481480259312ULL, 207746202010862ULL, 1609437858011364ULL,
+    477585758421515ULL, 3850430788664649ULL, 2682299074459173ULL, 149439089751274ULL,
+    3665760243877698ULL, 1356661512658931ULL, 1675903262368322ULL, 3355649228050892ULL,
+    99772108898412ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)34056422761564U,
-    (uint64_t)3315864838337811U, (uint64_t)3797032336888745U, (uint64_t)2580641850480806U,
-    (uint64_t)208048944042500U, (uint64_t)1233795288689421U, (uint64_t)1048795233382631U,
-    (uint64_t)646545158071530U, (uint64_t)1816025742137285U, (uint64_t)12245672982162U,
-    (uint64_t)2119364213800870U, (uint64_t)2034960311715107U, (uint64_t)3172697815804487U,
-    (uint64_t)4185144850224160U, (uint64_t)2792055915674U, (uint64_t)795534452139321U,
-    (uint64_t)3647836177838185U, (uint64_t)2681403398797991U, (uint64_t)3149264270306207U,
-    (uint64_t)278704080615511U, (uint64_t)2752552368344718U, (uint64_t)1363840972378818U,
-    (uint64_t)1877521512083293U, (uint64_t)1862111388059470U, (uint64_t)36200324115014U,
-    (uint64_t)4183622899327217U, (uint64_t)747381675363076U, (uint64_t)2772916395314624U,
-    (uint64_t)833767013119965U, (uint64_t)246274452928088U, (uint64_t)1526238021297781U,
-    (uint64_t)3327534966022747U, (uint64_t)1169012581910517U, (uint64_t)4430894603030025U,
-    (uint64_t)149242742442115U, (uint64_t)1002569704307172U, (uint64_t)2763252093432365U,
-    (uint64_t)3037748497732938U, (uint64_t)2329811173939457U, (uint64_t)270769113180752U,
-    (uint64_t)4344092461623432U, (uint64_t)892200524589382U, (uint64_t)2511418516713970U,
-    (uint64_t)103575031265398U, (uint64_t)183736033430252U, (uint64_t)583003071257308U,
-    (uint64_t)3357167344738425U, (uint64_t)4038099763242651U, (uint64_t)1776250620957255U,
-    (uint64_t)51334115864192U, (uint64_t)2616405698969611U, (uint64_t)1196364755910565U,
-    (uint64_t)3135228056210500U, (uint64_t)533729417611761U, (uint64_t)86564351229326U,
-    (uint64_t)98936129527281U, (uint64_t)4425305036630677U, (uint64_t)2980296390253408U,
-    (uint64_t)2487091677325739U, (uint64_t)10501977234280U, (uint64_t)1805646499831077U,
-    (uint64_t)3120615962395477U, (uint64_t)3634629685307533U, (uint64_t)3009632755291436U,
-    (uint64_t)16794051906523U, (uint64_t)2465481597883214U, (uint64_t)211492787490403U,
-    (uint64_t)1120942867046103U, (uint64_t)486438308572108U, (uint64_t)76058986271771U,
-    (uint64_t)2435216584587357U, (uint64_t)3076359381968283U, (uint64_t)1071594491489655U,
-    (uint64_t)3148707450339154U, (uint64_t)249332205737851U, (uint64_t)4171051176626809U,
-    (uint64_t)3165176227956388U, (uint64_t)2400901591835233U, (uint64_t)1435783621333022U,
-    (uint64_t)20312753440321U, (uint64_t)1767293887448005U, (uint64_t)685150647587522U,
-    (uint64_t)2957187934449906U, (uint64_t)382661319140439U, (uint64_t)177583591139601U,
-    (uint64_t)2083572648630743U, (uint64_t)1083410277889419U, (uint64_t)4267902097868310U,
-    (uint64_t)679989918385081U, (uint64_t)123155311554032U, (uint64_t)2830267662472020U,
-    (uint64_t)4476040509735924U, (uint64_t)526697201585144U, (uint64_t)3465306430573135U,
-    (uint64_t)2296616218591U, (uint64_t)1270626872734279U, (uint64_t)1049740198790549U,
-    (uint64_t)4197567214843444U, (uint64_t)1962225231320591U, (uint64_t)186125026796856U,
-    (uint64_t)737027567341142U, (uint64_t)4364616098174U, (uint64_t)3618884818756660U,
-    (uint64_t)1236837563717668U, (uint64_t)162873772439548U, (uint64_t)3081542470065122U,
-    (uint64_t)910331750163991U, (uint64_t)2110498143869827U, (uint64_t)3208473121852657U,
-    (uint64_t)94687786224509U, (uint64_t)4113309027567819U, (uint64_t)4272179438357536U,
-    (uint64_t)1857418654076140U, (uint64_t)1672678841741004U, (uint64_t)94482160248411U,
-    (uint64_t)1928652436799020U, (uint64_t)1750866462381515U, (uint64_t)4048060485672270U,
-    (uint64_t)4006680581258587U, (uint64_t)14850434761312U, (uint64_t)2828734997081648U,
-    (uint64_t)1975589525873972U, (uint64_t)3724347738416009U, (uint64_t)597163266689736U,
-    (uint64_t)14568362978551U, (uint64_t)2203865455839744U, (uint64_t)2237034958890595U,
-    (uint64_t)1863572986731818U, (uint64_t)2329774560279041U, (uint64_t)245105447642201U,
-    (uint64_t)2179697447864822U, (uint64_t)1769609498189882U, (uint64_t)1916950746430931U,
-    (uint64_t)847019613787312U, (uint64_t)163210606565100U, (uint64_t)3658248417400062U,
-    (uint64_t)717138296045881U, (uint64_t)42531212306121U, (uint64_t)1040915917097532U,
-    (uint64_t)77364489101310U, (uint64_t)539253504015590U, (uint64_t)732690726289841U,
-    (uint64_t)3401622034697806U, (uint64_t)2864593278358513U, (uint64_t)142611941887017U,
-    (uint64_t)536364617506702U, (uint64_t)845071859974284U, (uint64_t)4461787417089721U,
-    (uint64_t)2633811871939723U, (uint64_t)113619731985610U, (uint64_t)2535870015489566U,
-    (uint64_t)2146224665077830U, (uint64_t)2593725534662047U, (uint64_t)1332349537449710U,
-    (uint64_t)153375287068096U, (uint64_t)3689977177165276U, (uint64_t)3631865615314120U,
-    (uint64_t)184644878348929U, (uint64_t)2220481726602813U, (uint64_t)204002551273091U,
-    (uint64_t)3022560051766785U, (uint64_t)3125940458001213U, (uint64_t)4258299086906325U,
-    (uint64_t)1072471915162030U, (uint64_t)2797562724530U, (uint64_t)3974298156223059U,
-    (uint64_t)1624778551002554U, (uint64_t)3490703864485971U, (uint64_t)2533877484212458U,
-    (uint64_t)176107782538555U, (uint64_t)4275987398312137U, (uint64_t)4397120757693722U,
-    (uint64_t)3001292763847390U, (uint64_t)1556490837621310U, (uint64_t)70442953037671U,
-    (uint64_t)1558915972545974U, (uint64_t)744724505252845U, (uint64_t)2697230204313363U,
-    (uint64_t)3495671924212144U, (uint64_t)95744296878924U, (uint64_t)1508848630912047U,
-    (uint64_t)4163599342850968U, (uint64_t)1234988733935901U, (uint64_t)3789722472212706U,
-    (uint64_t)219522007052022U, (uint64_t)2106597506701262U, (uint64_t)3231115099832239U,
-    (uint64_t)1296436890593905U, (uint64_t)1016795619587656U, (uint64_t)231150565033388U,
-    (uint64_t)4205501688458754U, (uint64_t)2271569140386062U, (uint64_t)3421769599058157U,
-    (uint64_t)4118408853784554U, (uint64_t)276709341465173U, (uint64_t)2681340614854362U,
-    (uint64_t)2514413365628788U, (uint64_t)62294545067341U, (uint64_t)277610220069365U,
-    (uint64_t)252463150123799U, (uint64_t)2547353593759399U, (uint64_t)1857438147448607U,
-    (uint64_t)2964811969681256U, (uint64_t)3303706463835387U, (uint64_t)248936570980853U,
-    (uint64_t)3208982702478009U, (uint64_t)2518671051730787U, (uint64_t)727433853033835U,
-    (uint64_t)1290389308223446U, (uint64_t)220742793981035U, (uint64_t)3851225361654709U,
-    (uint64_t)2307489307934273U, (uint64_t)1151710489948266U, (uint64_t)289775285210516U,
-    (uint64_t)222685002397295U, (uint64_t)1222117478082108U, (uint64_t)2822029169395728U,
-    (uint64_t)1172146252219882U, (uint64_t)2626108105510259U, (uint64_t)209803527887167U,
-    (uint64_t)2718831919953281U, (uint64_t)4348638387588593U, (uint64_t)3761438313263183U,
-    (uint64_t)13169515318095U, (uint64_t)212893621229476U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+    208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+    1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+    3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL, 795534452139321ULL,
+    3647836177838185ULL, 2681403398797991ULL, 3149264270306207ULL, 278704080615511ULL,
+    2752552368344718ULL, 1363840972378818ULL, 1877521512083293ULL, 1862111388059470ULL,
+    36200324115014ULL, 4183622899327217ULL, 747381675363076ULL, 2772916395314624ULL,
+    833767013119965ULL, 246274452928088ULL, 1526238021297781ULL, 3327534966022747ULL,
+    1169012581910517ULL, 4430894603030025ULL, 149242742442115ULL, 1002569704307172ULL,
+    2763252093432365ULL, 3037748497732938ULL, 2329811173939457ULL, 270769113180752ULL,
+    4344092461623432ULL, 892200524589382ULL, 2511418516713970ULL, 103575031265398ULL,
+    183736033430252ULL, 583003071257308ULL, 3357167344738425ULL, 4038099763242651ULL,
+    1776250620957255ULL, 51334115864192ULL, 2616405698969611ULL, 1196364755910565ULL,
+    3135228056210500ULL, 533729417611761ULL, 86564351229326ULL, 98936129527281ULL,
+    4425305036630677ULL, 2980296390253408ULL, 2487091677325739ULL, 10501977234280ULL,
+    1805646499831077ULL, 3120615962395477ULL, 3634629685307533ULL, 3009632755291436ULL,
+    16794051906523ULL, 2465481597883214ULL, 211492787490403ULL, 1120942867046103ULL,
+    486438308572108ULL, 76058986271771ULL, 2435216584587357ULL, 3076359381968283ULL,
+    1071594491489655ULL, 3148707450339154ULL, 249332205737851ULL, 4171051176626809ULL,
+    3165176227956388ULL, 2400901591835233ULL, 1435783621333022ULL, 20312753440321ULL,
+    1767293887448005ULL, 685150647587522ULL, 2957187934449906ULL, 382661319140439ULL,
+    177583591139601ULL, 2083572648630743ULL, 1083410277889419ULL, 4267902097868310ULL,
+    679989918385081ULL, 123155311554032ULL, 2830267662472020ULL, 4476040509735924ULL,
+    526697201585144ULL, 3465306430573135ULL, 2296616218591ULL, 1270626872734279ULL,
+    1049740198790549ULL, 4197567214843444ULL, 1962225231320591ULL, 186125026796856ULL,
+    737027567341142ULL, 4364616098174ULL, 3618884818756660ULL, 1236837563717668ULL,
+    162873772439548ULL, 3081542470065122ULL, 910331750163991ULL, 2110498143869827ULL,
+    3208473121852657ULL, 94687786224509ULL, 4113309027567819ULL, 4272179438357536ULL,
+    1857418654076140ULL, 1672678841741004ULL, 94482160248411ULL, 1928652436799020ULL,
+    1750866462381515ULL, 4048060485672270ULL, 4006680581258587ULL, 14850434761312ULL,
+    2828734997081648ULL, 1975589525873972ULL, 3724347738416009ULL, 597163266689736ULL,
+    14568362978551ULL, 2203865455839744ULL, 2237034958890595ULL, 1863572986731818ULL,
+    2329774560279041ULL, 245105447642201ULL, 2179697447864822ULL, 1769609498189882ULL,
+    1916950746430931ULL, 847019613787312ULL, 163210606565100ULL, 3658248417400062ULL,
+    717138296045881ULL, 42531212306121ULL, 1040915917097532ULL, 77364489101310ULL,
+    539253504015590ULL, 732690726289841ULL, 3401622034697806ULL, 2864593278358513ULL,
+    142611941887017ULL, 536364617506702ULL, 845071859974284ULL, 4461787417089721ULL,
+    2633811871939723ULL, 113619731985610ULL, 2535870015489566ULL, 2146224665077830ULL,
+    2593725534662047ULL, 1332349537449710ULL, 153375287068096ULL, 3689977177165276ULL,
+    3631865615314120ULL, 184644878348929ULL, 2220481726602813ULL, 204002551273091ULL,
+    3022560051766785ULL, 3125940458001213ULL, 4258299086906325ULL, 1072471915162030ULL,
+    2797562724530ULL, 3974298156223059ULL, 1624778551002554ULL, 3490703864485971ULL,
+    2533877484212458ULL, 176107782538555ULL, 4275987398312137ULL, 4397120757693722ULL,
+    3001292763847390ULL, 1556490837621310ULL, 70442953037671ULL, 1558915972545974ULL,
+    744724505252845ULL, 2697230204313363ULL, 3495671924212144ULL, 95744296878924ULL,
+    1508848630912047ULL, 4163599342850968ULL, 1234988733935901ULL, 3789722472212706ULL,
+    219522007052022ULL, 2106597506701262ULL, 3231115099832239ULL, 1296436890593905ULL,
+    1016795619587656ULL, 231150565033388ULL, 4205501688458754ULL, 2271569140386062ULL,
+    3421769599058157ULL, 4118408853784554ULL, 276709341465173ULL, 2681340614854362ULL,
+    2514413365628788ULL, 62294545067341ULL, 277610220069365ULL, 252463150123799ULL,
+    2547353593759399ULL, 1857438147448607ULL, 2964811969681256ULL, 3303706463835387ULL,
+    248936570980853ULL, 3208982702478009ULL, 2518671051730787ULL, 727433853033835ULL,
+    1290389308223446ULL, 220742793981035ULL, 3851225361654709ULL, 2307489307934273ULL,
+    1151710489948266ULL, 289775285210516ULL, 222685002397295ULL, 1222117478082108ULL,
+    2822029169395728ULL, 1172146252219882ULL, 2626108105510259ULL, 209803527887167ULL,
+    2718831919953281ULL, 4348638387588593ULL, 3761438313263183ULL, 13169515318095ULL,
+    212893621229476ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w5[480U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U,
-    (uint64_t)2104196179349630U, (uint64_t)501632371208418U, (uint64_t)1666838991431177U,
-    (uint64_t)445606193139838U, (uint64_t)73704603396096U, (uint64_t)3140284774064777U,
-    (uint64_t)1356066420820179U, (uint64_t)227054159419281U, (uint64_t)1847611229198687U,
-    (uint64_t)82327838827660U, (uint64_t)3704027573265803U, (uint64_t)1585260489220244U,
-    (uint64_t)4404647914931933U, (uint64_t)2424649827425515U, (uint64_t)206821944206116U,
-    (uint64_t)1508635776287972U, (uint64_t)1933584575629676U, (uint64_t)1903635423783032U,
-    (uint64_t)4193642165165650U, (uint64_t)234321074690644U, (uint64_t)210406774251925U,
-    (uint64_t)1965845668185599U, (uint64_t)3059839433804731U, (uint64_t)1933300510683631U,
-    (uint64_t)150696600689211U, (uint64_t)4069293682158567U, (uint64_t)4346344602660044U,
-    (uint64_t)312200249664561U, (uint64_t)2495020807621840U, (uint64_t)1912707714385U,
-    (uint64_t)299345978159762U, (uint64_t)1164752722686920U, (uint64_t)225322433710338U,
-    (uint64_t)3128747381283759U, (uint64_t)275659067815583U, (uint64_t)1489671057429039U,
-    (uint64_t)1567693343342676U, (uint64_t)921672046098071U, (uint64_t)3707418899384085U,
-    (uint64_t)54646424931593U, (uint64_t)4026733380127147U, (uint64_t)2933435393699231U,
-    (uint64_t)3356593659521967U, (uint64_t)3637750749325529U, (uint64_t)232939412379045U,
-    (uint64_t)2298399636043069U, (uint64_t)270361546063041U, (uint64_t)2523933572551420U,
-    (uint64_t)3456896091572950U, (uint64_t)185447004732850U, (uint64_t)429322937697821U,
-    (uint64_t)2579704215668222U, (uint64_t)695065378803349U, (uint64_t)3987916247731243U,
-    (uint64_t)255159546348233U, (uint64_t)3057777929921282U, (uint64_t)1608970699916312U,
-    (uint64_t)1902369623063807U, (uint64_t)1413619643652777U, (uint64_t)94983996321227U,
-    (uint64_t)2832873179548050U, (uint64_t)4335430233622555U, (uint64_t)1559023976028843U,
-    (uint64_t)3297181988648895U, (uint64_t)100072021232323U, (uint64_t)2124984034109675U,
-    (uint64_t)4501252835618918U, (uint64_t)2053336899483297U, (uint64_t)638807226463876U,
-    (uint64_t)278445213600634U, (uint64_t)2311236445660555U, (uint64_t)303317664040012U,
-    (uint64_t)2659353858089024U, (uint64_t)3598827423980130U, (uint64_t)176059343827873U,
-    (uint64_t)3891639526275437U, (uint64_t)252823982819463U, (uint64_t)3404823300622345U,
-    (uint64_t)2758370772497456U, (uint64_t)91397496598783U, (uint64_t)2248661144141892U,
-    (uint64_t)491087075271969U, (uint64_t)1786344894571315U, (uint64_t)452497694885923U,
-    (uint64_t)34039628873357U, (uint64_t)2116503165025197U, (uint64_t)4436733709429923U,
-    (uint64_t)3045800776819238U, (uint64_t)1385518906078375U, (uint64_t)110495603336764U,
-    (uint64_t)4051447296249587U, (uint64_t)1103557421498625U, (uint64_t)1840785058439622U,
-    (uint64_t)425322753992314U, (uint64_t)98330046771676U, (uint64_t)365407468686431U,
-    (uint64_t)2611246859977123U, (uint64_t)3050253933135339U, (uint64_t)1006482220896688U,
-    (uint64_t)166818196428389U, (uint64_t)3415236093104372U, (uint64_t)1762308883882288U,
-    (uint64_t)1327828123094558U, (uint64_t)3403946425556706U, (uint64_t)96503464455441U,
-    (uint64_t)3893015304031471U, (uint64_t)3740839477490397U, (uint64_t)2411470812852231U,
-    (uint64_t)940927462436211U, (uint64_t)163825285911099U, (uint64_t)1622441495640386U,
-    (uint64_t)850224095680266U, (uint64_t)76199085900939U, (uint64_t)1941852365144042U,
-    (uint64_t)140326673652807U, (uint64_t)3161611011249524U, (uint64_t)317297150009965U,
-    (uint64_t)2145053259340619U, (uint64_t)2180498176457552U, (uint64_t)38457740506224U,
-    (uint64_t)394174899129468U, (uint64_t)2687474560485245U, (uint64_t)1542175980184516U,
-    (uint64_t)1628502671124819U, (uint64_t)48477401124385U, (uint64_t)4474181600025082U,
-    (uint64_t)2142747956365708U, (uint64_t)1638299432475478U, (uint64_t)2005869320353249U,
-    (uint64_t)112292630760956U, (uint64_t)1887521965171588U, (uint64_t)457587531429696U,
-    (uint64_t)840994209504042U, (uint64_t)4268060856325798U, (uint64_t)195597993440388U,
-    (uint64_t)4148484749020338U, (uint64_t)2074885000909672U, (uint64_t)2309839019263165U,
-    (uint64_t)2087616209681024U, (uint64_t)257214370719966U, (uint64_t)2331363508376581U,
-    (uint64_t)1233124357504711U, (uint64_t)2849542202650296U, (uint64_t)3790982825325736U,
-    (uint64_t)13381453503890U, (uint64_t)1665246594531069U, (uint64_t)4165624287443904U,
-    (uint64_t)3418759698027493U, (uint64_t)2118493255117399U, (uint64_t)136249206366067U,
-    (uint64_t)4064050233283309U, (uint64_t)1368779887911300U, (uint64_t)4370550759530269U,
-    (uint64_t)66992990631341U, (uint64_t)84442368922270U, (uint64_t)2139322635321394U,
-    (uint64_t)2076163483726795U, (uint64_t)657097866349103U, (uint64_t)2095579409488071U,
-    (uint64_t)226525774791341U, (uint64_t)4445744257665359U, (uint64_t)2035752839278107U,
-    (uint64_t)1998242662838304U, (uint64_t)1601548415521694U, (uint64_t)151297684296198U,
-    (uint64_t)1350963039017303U, (uint64_t)2624916349548281U, (uint64_t)2018863259670197U,
-    (uint64_t)2717274357461290U, (uint64_t)94024796961533U, (uint64_t)711335520409111U,
-    (uint64_t)4322093765820263U, (uint64_t)2041650358174649U, (uint64_t)3439791603157577U,
-    (uint64_t)179292018616267U, (uint64_t)2436436921286669U, (uint64_t)3905268797208340U,
-    (uint64_t)2829194895162985U, (uint64_t)1355175382191543U, (uint64_t)55128779761539U,
-    (uint64_t)2648428998786922U, (uint64_t)869805912573515U, (uint64_t)3706708942847864U,
-    (uint64_t)2785288916584667U, (uint64_t)37156862850147U, (uint64_t)1422245336293228U,
-    (uint64_t)4497066058933021U, (uint64_t)85588912978349U, (uint64_t)2616252221194611U,
-    (uint64_t)53506393720989U, (uint64_t)3727539190732644U, (uint64_t)872132446545237U,
-    (uint64_t)933583590986077U, (uint64_t)3794591170581203U, (uint64_t)167875550514069U,
-    (uint64_t)2267466834993297U, (uint64_t)3072652681756816U, (uint64_t)2108499037430803U,
-    (uint64_t)1606735192928366U, (uint64_t)72339568815255U, (uint64_t)3258484260684219U,
-    (uint64_t)3277927277719855U, (uint64_t)2459560373011535U, (uint64_t)1672794293294033U,
-    (uint64_t)227460934880669U, (uint64_t)3702454405413705U, (uint64_t)106168148441676U,
-    (uint64_t)1356617643071159U, (uint64_t)3280896569942762U, (uint64_t)142618711614302U,
-    (uint64_t)4291782740862057U, (uint64_t)4141020884874235U, (uint64_t)3720787221267125U,
-    (uint64_t)552884940089351U, (uint64_t)174626154407180U, (uint64_t)972071013326540U,
-    (uint64_t)4458530419931903U, (uint64_t)4435168973822858U, (uint64_t)1902967548748411U,
-    (uint64_t)53007977605840U, (uint64_t)2453997334323925U, (uint64_t)3653077937283262U,
-    (uint64_t)850660265046356U, (uint64_t)312721924805450U, (uint64_t)268503679240683U,
-    (uint64_t)256960167714122U, (uint64_t)1474492507858350U, (uint64_t)2456345526438488U,
-    (uint64_t)3686029507160255U, (uint64_t)279158933010398U, (uint64_t)3646946293948063U,
-    (uint64_t)704477527214036U, (uint64_t)3387744169891031U, (uint64_t)3772622670980241U,
-    (uint64_t)136368897543304U, (uint64_t)3744894052577607U, (uint64_t)1976007214443430U,
-    (uint64_t)2090045379763451U, (uint64_t)968565474458988U, (uint64_t)234295114806066U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL, 2104196179349630ULL, 501632371208418ULL, 1666838991431177ULL,
+    445606193139838ULL, 73704603396096ULL, 3140284774064777ULL, 1356066420820179ULL,
+    227054159419281ULL, 1847611229198687ULL, 82327838827660ULL, 3704027573265803ULL,
+    1585260489220244ULL, 4404647914931933ULL, 2424649827425515ULL, 206821944206116ULL,
+    1508635776287972ULL, 1933584575629676ULL, 1903635423783032ULL, 4193642165165650ULL,
+    234321074690644ULL, 210406774251925ULL, 1965845668185599ULL, 3059839433804731ULL,
+    1933300510683631ULL, 150696600689211ULL, 4069293682158567ULL, 4346344602660044ULL,
+    312200249664561ULL, 2495020807621840ULL, 1912707714385ULL, 299345978159762ULL,
+    1164752722686920ULL, 225322433710338ULL, 3128747381283759ULL, 275659067815583ULL,
+    1489671057429039ULL, 1567693343342676ULL, 921672046098071ULL, 3707418899384085ULL,
+    54646424931593ULL, 4026733380127147ULL, 2933435393699231ULL, 3356593659521967ULL,
+    3637750749325529ULL, 232939412379045ULL, 2298399636043069ULL, 270361546063041ULL,
+    2523933572551420ULL, 3456896091572950ULL, 185447004732850ULL, 429322937697821ULL,
+    2579704215668222ULL, 695065378803349ULL, 3987916247731243ULL, 255159546348233ULL,
+    3057777929921282ULL, 1608970699916312ULL, 1902369623063807ULL, 1413619643652777ULL,
+    94983996321227ULL, 2832873179548050ULL, 4335430233622555ULL, 1559023976028843ULL,
+    3297181988648895ULL, 100072021232323ULL, 2124984034109675ULL, 4501252835618918ULL,
+    2053336899483297ULL, 638807226463876ULL, 278445213600634ULL, 2311236445660555ULL,
+    303317664040012ULL, 2659353858089024ULL, 3598827423980130ULL, 176059343827873ULL,
+    3891639526275437ULL, 252823982819463ULL, 3404823300622345ULL, 2758370772497456ULL,
+    91397496598783ULL, 2248661144141892ULL, 491087075271969ULL, 1786344894571315ULL,
+    452497694885923ULL, 34039628873357ULL, 2116503165025197ULL, 4436733709429923ULL,
+    3045800776819238ULL, 1385518906078375ULL, 110495603336764ULL, 4051447296249587ULL,
+    1103557421498625ULL, 1840785058439622ULL, 425322753992314ULL, 98330046771676ULL,
+    365407468686431ULL, 2611246859977123ULL, 3050253933135339ULL, 1006482220896688ULL,
+    166818196428389ULL, 3415236093104372ULL, 1762308883882288ULL, 1327828123094558ULL,
+    3403946425556706ULL, 96503464455441ULL, 3893015304031471ULL, 3740839477490397ULL,
+    2411470812852231ULL, 940927462436211ULL, 163825285911099ULL, 1622441495640386ULL,
+    850224095680266ULL, 76199085900939ULL, 1941852365144042ULL, 140326673652807ULL,
+    3161611011249524ULL, 317297150009965ULL, 2145053259340619ULL, 2180498176457552ULL,
+    38457740506224ULL, 394174899129468ULL, 2687474560485245ULL, 1542175980184516ULL,
+    1628502671124819ULL, 48477401124385ULL, 4474181600025082ULL, 2142747956365708ULL,
+    1638299432475478ULL, 2005869320353249ULL, 112292630760956ULL, 1887521965171588ULL,
+    457587531429696ULL, 840994209504042ULL, 4268060856325798ULL, 195597993440388ULL,
+    4148484749020338ULL, 2074885000909672ULL, 2309839019263165ULL, 2087616209681024ULL,
+    257214370719966ULL, 2331363508376581ULL, 1233124357504711ULL, 2849542202650296ULL,
+    3790982825325736ULL, 13381453503890ULL, 1665246594531069ULL, 4165624287443904ULL,
+    3418759698027493ULL, 2118493255117399ULL, 136249206366067ULL, 4064050233283309ULL,
+    1368779887911300ULL, 4370550759530269ULL, 66992990631341ULL, 84442368922270ULL,
+    2139322635321394ULL, 2076163483726795ULL, 657097866349103ULL, 2095579409488071ULL,
+    226525774791341ULL, 4445744257665359ULL, 2035752839278107ULL, 1998242662838304ULL,
+    1601548415521694ULL, 151297684296198ULL, 1350963039017303ULL, 2624916349548281ULL,
+    2018863259670197ULL, 2717274357461290ULL, 94024796961533ULL, 711335520409111ULL,
+    4322093765820263ULL, 2041650358174649ULL, 3439791603157577ULL, 179292018616267ULL,
+    2436436921286669ULL, 3905268797208340ULL, 2829194895162985ULL, 1355175382191543ULL,
+    55128779761539ULL, 2648428998786922ULL, 869805912573515ULL, 3706708942847864ULL,
+    2785288916584667ULL, 37156862850147ULL, 1422245336293228ULL, 4497066058933021ULL,
+    85588912978349ULL, 2616252221194611ULL, 53506393720989ULL, 3727539190732644ULL,
+    872132446545237ULL, 933583590986077ULL, 3794591170581203ULL, 167875550514069ULL,
+    2267466834993297ULL, 3072652681756816ULL, 2108499037430803ULL, 1606735192928366ULL,
+    72339568815255ULL, 3258484260684219ULL, 3277927277719855ULL, 2459560373011535ULL,
+    1672794293294033ULL, 227460934880669ULL, 3702454405413705ULL, 106168148441676ULL,
+    1356617643071159ULL, 3280896569942762ULL, 142618711614302ULL, 4291782740862057ULL,
+    4141020884874235ULL, 3720787221267125ULL, 552884940089351ULL, 174626154407180ULL,
+    972071013326540ULL, 4458530419931903ULL, 4435168973822858ULL, 1902967548748411ULL,
+    53007977605840ULL, 2453997334323925ULL, 3653077937283262ULL, 850660265046356ULL,
+    312721924805450ULL, 268503679240683ULL, 256960167714122ULL, 1474492507858350ULL,
+    2456345526438488ULL, 3686029507160255ULL, 279158933010398ULL, 3646946293948063ULL,
+    704477527214036ULL, 3387744169891031ULL, 3772622670980241ULL, 136368897543304ULL,
+    3744894052577607ULL, 1976007214443430ULL, 2090045379763451ULL, 968565474458988ULL,
+    234295114806066ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_P256_PrecompTable.h b/include/internal/Hacl_P256_PrecompTable.h
index f185c2be..c852ef8c 100644
--- a/include/internal/Hacl_P256_PrecompTable.h
+++ b/include/internal/Hacl_P256_PrecompTable.h
@@ -39,476 +39,360 @@ static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1499621593102562565U,
-    (uint64_t)16692369783039433128U, (uint64_t)15337520135922861848U,
-    (uint64_t)5455737214495366228U, (uint64_t)17827017231032529600U,
-    (uint64_t)12413621606240782649U, (uint64_t)2290483008028286132U,
-    (uint64_t)15752017553340844820U, (uint64_t)4846430910634234874U,
-    (uint64_t)10861682798464583253U, (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U,
-    (uint64_t)9866710912401645115U, (uint64_t)1162548847543228595U, (uint64_t)7649967190445130486U,
-    (uint64_t)5212340432230915749U, (uint64_t)7572620550182916491U, (uint64_t)14876145112448665096U,
-    (uint64_t)2063227348838176167U, (uint64_t)3519435548295415847U, (uint64_t)8390400282019023103U,
-    (uint64_t)17666843593163037841U, (uint64_t)9450204148816496323U, (uint64_t)8483374507652916768U,
-    (uint64_t)6254661047265818424U, (uint64_t)16382127809582285023U, (uint64_t)125359443771153172U,
-    (uint64_t)1374336701588437897U, (uint64_t)11362596098420127726U, (uint64_t)2101654420738681387U,
-    (uint64_t)12772780342444840510U, (uint64_t)12546934328908550060U,
-    (uint64_t)8331880412333790397U, (uint64_t)11687262051473819904U, (uint64_t)8926848496503457587U,
-    (uint64_t)9603974142010467857U, (uint64_t)13199952163826973175U, (uint64_t)2189856264898797734U,
-    (uint64_t)11356074861870267226U, (uint64_t)2027714896422561895U, (uint64_t)5261606367808050149U,
-    (uint64_t)153855954337762312U, (uint64_t)6375919692894573986U, (uint64_t)12364041207536146533U,
-    (uint64_t)1891896010455057160U, (uint64_t)1568123795087313171U, (uint64_t)18138710056556660101U,
-    (uint64_t)6004886947510047736U, (uint64_t)4811859325589542932U, (uint64_t)3618763430148954981U,
-    (uint64_t)11434521746258554122U, (uint64_t)10086341535864049427U,
-    (uint64_t)8073421629570399570U, (uint64_t)12680586148814729338U, (uint64_t)9619958020761569612U,
-    (uint64_t)15827203580658384478U, (uint64_t)12832694810937550406U,
-    (uint64_t)14977975484447400910U, (uint64_t)5478002389061063653U,
-    (uint64_t)14731136312639060880U, (uint64_t)4317867687275472033U, (uint64_t)6642650962855259884U,
-    (uint64_t)2514254944289495285U, (uint64_t)14231405641534478436U, (uint64_t)4045448346091518946U,
-    (uint64_t)8985477013445972471U, (uint64_t)8869039454457032149U, (uint64_t)4356978486208692970U,
-    (uint64_t)10805288613335538577U, (uint64_t)12832353127812502042U,
-    (uint64_t)4576590051676547490U, (uint64_t)6728053735138655107U, (uint64_t)17814206719173206184U,
-    (uint64_t)79790138573994940U, (uint64_t)17920293215101822267U, (uint64_t)13422026625585728864U,
-    (uint64_t)5018058010492547271U, (uint64_t)110232326023384102U, (uint64_t)10834264070056942976U,
-    (uint64_t)15222249086119088588U, (uint64_t)15119439519142044997U,
-    (uint64_t)11655511970063167313U, (uint64_t)1614477029450566107U, (uint64_t)3619322817271059794U,
-    (uint64_t)9352862040415412867U, (uint64_t)14017522553242747074U,
-    (uint64_t)13138513643674040327U, (uint64_t)3610195242889455765U, (uint64_t)8371069193996567291U,
-    (uint64_t)12670227996544662654U, (uint64_t)1205961025092146303U,
-    (uint64_t)13106709934003962112U, (uint64_t)4350113471327723407U,
-    (uint64_t)15060941403739680459U, (uint64_t)13639127647823205030U,
-    (uint64_t)10790943339357725715U, (uint64_t)498760574280648264U, (uint64_t)17922071907832082887U,
-    (uint64_t)15122670976670152145U, (uint64_t)6275027991110214322U, (uint64_t)7250912847491816402U,
-    (uint64_t)15206617260142982380U, (uint64_t)3385668313694152877U,
-    (uint64_t)17522479771766801905U, (uint64_t)2965919117476170655U, (uint64_t)1553238516603269404U,
-    (uint64_t)5820770015631050991U, (uint64_t)4999445222232605348U, (uint64_t)9245650860833717444U,
-    (uint64_t)1508811811724230728U, (uint64_t)5190684913765614385U, (uint64_t)15692927070934536166U,
-    (uint64_t)12981978499190500902U, (uint64_t)5143491963193394698U, (uint64_t)7705698092144084129U,
-    (uint64_t)581120653055084783U, (uint64_t)13886552864486459714U, (uint64_t)6290301270652587255U,
-    (uint64_t)8663431529954393128U, (uint64_t)17033405846475472443U, (uint64_t)5206780355442651635U,
-    (uint64_t)12580364474736467688U, (uint64_t)17934601912005283310U,
-    (uint64_t)15119491731028933652U, (uint64_t)17848231399859044858U,
-    (uint64_t)4427673319524919329U, (uint64_t)2673607337074368008U, (uint64_t)14034876464294699949U,
-    (uint64_t)10938948975420813697U, (uint64_t)15202340615298669183U,
-    (uint64_t)5496603454069431071U, (uint64_t)2486526142064906845U, (uint64_t)4507882119510526802U,
-    (uint64_t)13888151172411390059U, (uint64_t)15049027856908071726U,
-    (uint64_t)9667231543181973158U, (uint64_t)6406671575277563202U, (uint64_t)3395801050331215139U,
-    (uint64_t)9813607433539108308U, (uint64_t)2681417728820980381U, (uint64_t)18407064643927113994U,
-    (uint64_t)7707177692113485527U, (uint64_t)14218149384635317074U, (uint64_t)3658668346206375919U,
-    (uint64_t)15404713991002362166U, (uint64_t)10152074687696195207U,
-    (uint64_t)10926946599582128139U, (uint64_t)16907298600007085320U,
-    (uint64_t)16544287219664720279U, (uint64_t)11007075933432813205U,
-    (uint64_t)8652245965145713599U, (uint64_t)7857626748965990384U, (uint64_t)5602306604520095870U,
-    (uint64_t)2525139243938658618U, (uint64_t)14405696176872077447U,
-    (uint64_t)18432270482137885332U, (uint64_t)9913880809120071177U,
-    (uint64_t)16896141737831216972U, (uint64_t)7484791498211214829U,
-    (uint64_t)15635259968266497469U, (uint64_t)8495118537612215624U, (uint64_t)4915477980562575356U,
-    (uint64_t)16453519279754924350U, (uint64_t)14462108244565406969U,
-    (uint64_t)14837837755237096687U, (uint64_t)14130171078892575346U,
-    (uint64_t)15423793222528491497U, (uint64_t)5460399262075036084U,
-    (uint64_t)16085440580308415349U, (uint64_t)26873200736954488U, (uint64_t)5603655807457499550U,
-    (uint64_t)3342202915871129617U, (uint64_t)1604413932150236626U, (uint64_t)9684226585089458974U,
-    (uint64_t)1213229904006618539U, (uint64_t)6782978662408837236U, (uint64_t)11197029877749307372U,
-    (uint64_t)14085968786551657744U, (uint64_t)17352273610494009342U,
-    (uint64_t)7876582961192434984U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1499621593102562565ULL, 16692369783039433128ULL,
+    15337520135922861848ULL, 5455737214495366228ULL, 17827017231032529600ULL,
+    12413621606240782649ULL, 2290483008028286132ULL, 15752017553340844820ULL,
+    4846430910634234874ULL, 10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL,
+    9866710912401645115ULL, 1162548847543228595ULL, 7649967190445130486ULL, 5212340432230915749ULL,
+    7572620550182916491ULL, 14876145112448665096ULL, 2063227348838176167ULL, 3519435548295415847ULL,
+    8390400282019023103ULL, 17666843593163037841ULL, 9450204148816496323ULL, 8483374507652916768ULL,
+    6254661047265818424ULL, 16382127809582285023ULL, 125359443771153172ULL, 1374336701588437897ULL,
+    11362596098420127726ULL, 2101654420738681387ULL, 12772780342444840510ULL,
+    12546934328908550060ULL, 8331880412333790397ULL, 11687262051473819904ULL,
+    8926848496503457587ULL, 9603974142010467857ULL, 13199952163826973175ULL, 2189856264898797734ULL,
+    11356074861870267226ULL, 2027714896422561895ULL, 5261606367808050149ULL, 153855954337762312ULL,
+    6375919692894573986ULL, 12364041207536146533ULL, 1891896010455057160ULL, 1568123795087313171ULL,
+    18138710056556660101ULL, 6004886947510047736ULL, 4811859325589542932ULL, 3618763430148954981ULL,
+    11434521746258554122ULL, 10086341535864049427ULL, 8073421629570399570ULL,
+    12680586148814729338ULL, 9619958020761569612ULL, 15827203580658384478ULL,
+    12832694810937550406ULL, 14977975484447400910ULL, 5478002389061063653ULL,
+    14731136312639060880ULL, 4317867687275472033ULL, 6642650962855259884ULL, 2514254944289495285ULL,
+    14231405641534478436ULL, 4045448346091518946ULL, 8985477013445972471ULL, 8869039454457032149ULL,
+    4356978486208692970ULL, 10805288613335538577ULL, 12832353127812502042ULL,
+    4576590051676547490ULL, 6728053735138655107ULL, 17814206719173206184ULL, 79790138573994940ULL,
+    17920293215101822267ULL, 13422026625585728864ULL, 5018058010492547271ULL, 110232326023384102ULL,
+    10834264070056942976ULL, 15222249086119088588ULL, 15119439519142044997ULL,
+    11655511970063167313ULL, 1614477029450566107ULL, 3619322817271059794ULL, 9352862040415412867ULL,
+    14017522553242747074ULL, 13138513643674040327ULL, 3610195242889455765ULL,
+    8371069193996567291ULL, 12670227996544662654ULL, 1205961025092146303ULL,
+    13106709934003962112ULL, 4350113471327723407ULL, 15060941403739680459ULL,
+    13639127647823205030ULL, 10790943339357725715ULL, 498760574280648264ULL,
+    17922071907832082887ULL, 15122670976670152145ULL, 6275027991110214322ULL,
+    7250912847491816402ULL, 15206617260142982380ULL, 3385668313694152877ULL,
+    17522479771766801905ULL, 2965919117476170655ULL, 1553238516603269404ULL, 5820770015631050991ULL,
+    4999445222232605348ULL, 9245650860833717444ULL, 1508811811724230728ULL, 5190684913765614385ULL,
+    15692927070934536166ULL, 12981978499190500902ULL, 5143491963193394698ULL,
+    7705698092144084129ULL, 581120653055084783ULL, 13886552864486459714ULL, 6290301270652587255ULL,
+    8663431529954393128ULL, 17033405846475472443ULL, 5206780355442651635ULL,
+    12580364474736467688ULL, 17934601912005283310ULL, 15119491731028933652ULL,
+    17848231399859044858ULL, 4427673319524919329ULL, 2673607337074368008ULL,
+    14034876464294699949ULL, 10938948975420813697ULL, 15202340615298669183ULL,
+    5496603454069431071ULL, 2486526142064906845ULL, 4507882119510526802ULL, 13888151172411390059ULL,
+    15049027856908071726ULL, 9667231543181973158ULL, 6406671575277563202ULL, 3395801050331215139ULL,
+    9813607433539108308ULL, 2681417728820980381ULL, 18407064643927113994ULL, 7707177692113485527ULL,
+    14218149384635317074ULL, 3658668346206375919ULL, 15404713991002362166ULL,
+    10152074687696195207ULL, 10926946599582128139ULL, 16907298600007085320ULL,
+    16544287219664720279ULL, 11007075933432813205ULL, 8652245965145713599ULL,
+    7857626748965990384ULL, 5602306604520095870ULL, 2525139243938658618ULL, 14405696176872077447ULL,
+    18432270482137885332ULL, 9913880809120071177ULL, 16896141737831216972ULL,
+    7484791498211214829ULL, 15635259968266497469ULL, 8495118537612215624ULL, 4915477980562575356ULL,
+    16453519279754924350ULL, 14462108244565406969ULL, 14837837755237096687ULL,
+    14130171078892575346ULL, 15423793222528491497ULL, 5460399262075036084ULL,
+    16085440580308415349ULL, 26873200736954488ULL, 5603655807457499550ULL, 3342202915871129617ULL,
+    1604413932150236626ULL, 9684226585089458974ULL, 1213229904006618539ULL, 6782978662408837236ULL,
+    11197029877749307372ULL, 14085968786551657744ULL, 17352273610494009342ULL,
+    7876582961192434984ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)14619254753077084366U,
-    (uint64_t)13913835116514008593U, (uint64_t)15060744674088488145U,
-    (uint64_t)17668414598203068685U, (uint64_t)10761169236902342334U,
-    (uint64_t)15467027479157446221U, (uint64_t)14989185522423469618U,
-    (uint64_t)14354539272510107003U, (uint64_t)14298211796392133693U,
-    (uint64_t)13270323784253711450U, (uint64_t)13380964971965046957U,
-    (uint64_t)8686204248456909699U, (uint64_t)17434630286744937066U, (uint64_t)1355903775279084720U,
-    (uint64_t)7554695053550308662U, (uint64_t)11354971222741863570U, (uint64_t)564601613420749879U,
-    (uint64_t)8466325837259054896U, (uint64_t)10752965181772434263U,
-    (uint64_t)11405876547368426319U, (uint64_t)13791894568738930940U,
-    (uint64_t)8230587134406354675U, (uint64_t)12415514098722758608U,
-    (uint64_t)18414183046995786744U, (uint64_t)15508000368227372870U,
-    (uint64_t)5781062464627999307U, (uint64_t)15339429052219195590U,
-    (uint64_t)16038703753810741903U, (uint64_t)9587718938298980714U, (uint64_t)4822658817952386407U,
-    (uint64_t)1376351024833260660U, (uint64_t)1120174910554766702U, (uint64_t)1730170933262569274U,
-    (uint64_t)5187428548444533500U, (uint64_t)16242053503368957131U, (uint64_t)3036811119519868279U,
-    (uint64_t)1760267587958926638U, (uint64_t)170244572981065185U, (uint64_t)8063080791967388171U,
-    (uint64_t)4824892826607692737U, (uint64_t)16286391083472040552U,
-    (uint64_t)11945158615253358747U, (uint64_t)14096887760410224200U,
-    (uint64_t)1613720831904557039U, (uint64_t)14316966673761197523U,
-    (uint64_t)17411006201485445341U, (uint64_t)8112301506943158801U, (uint64_t)2069889233927989984U,
-    (uint64_t)10082848378277483927U, (uint64_t)3609691194454404430U, (uint64_t)6110437205371933689U,
-    (uint64_t)9769135977342231601U, (uint64_t)11977962151783386478U,
-    (uint64_t)18088718692559983573U, (uint64_t)11741637975753055U, (uint64_t)11110390325701582190U,
-    (uint64_t)1341402251566067019U, (uint64_t)3028229550849726478U, (uint64_t)10438984083997451310U,
-    (uint64_t)12730851885100145709U, (uint64_t)11524169532089894189U,
-    (uint64_t)4523375903229602674U, (uint64_t)2028602258037385622U, (uint64_t)17082839063089388410U,
-    (uint64_t)6103921364634113167U, (uint64_t)17066180888225306102U,
-    (uint64_t)11395680486707876195U, (uint64_t)10952892272443345484U,
-    (uint64_t)8792831960605859401U, (uint64_t)14194485427742325139U,
-    (uint64_t)15146020821144305250U, (uint64_t)1654766014957123343U, (uint64_t)7955526243090948551U,
-    (uint64_t)3989277566080493308U, (uint64_t)12229385116397931231U,
-    (uint64_t)13430548930727025562U, (uint64_t)3434892688179800602U, (uint64_t)8431998794645622027U,
-    (uint64_t)12132530981596299272U, (uint64_t)2289461608863966999U,
-    (uint64_t)18345870950201487179U, (uint64_t)13517947207801901576U,
-    (uint64_t)5213113244172561159U, (uint64_t)17632986594098340879U, (uint64_t)4405251818133148856U,
-    (uint64_t)11783009269435447793U, (uint64_t)9332138983770046035U,
-    (uint64_t)12863411548922539505U, (uint64_t)3717030292816178224U,
-    (uint64_t)10026078446427137374U, (uint64_t)11167295326594317220U,
-    (uint64_t)12425328773141588668U, (uint64_t)5760335125172049352U, (uint64_t)9016843701117277863U,
-    (uint64_t)5657892835694680172U, (uint64_t)11025130589305387464U, (uint64_t)1368484957977406173U,
-    (uint64_t)17361351345281258834U, (uint64_t)1907113641956152700U,
-    (uint64_t)16439233413531427752U, (uint64_t)5893322296986588932U,
-    (uint64_t)14000206906171746627U, (uint64_t)14979266987545792900U,
-    (uint64_t)6926291766898221120U, (uint64_t)7162023296083360752U, (uint64_t)14762747553625382529U,
-    (uint64_t)12610831658612406849U, (uint64_t)10462926899548715515U,
-    (uint64_t)4794017723140405312U, (uint64_t)5234438200490163319U, (uint64_t)8019519110339576320U,
-    (uint64_t)7194604241290530100U, (uint64_t)12626770134810813246U,
-    (uint64_t)10793074474236419890U, (uint64_t)11323224347913978783U,
-    (uint64_t)16831128015895380245U, (uint64_t)18323094195124693378U,
-    (uint64_t)2361097165281567692U, (uint64_t)15755578675014279498U,
-    (uint64_t)14289876470325854580U, (uint64_t)12856787656093616839U,
-    (uint64_t)3578928531243900594U, (uint64_t)3847532758790503699U, (uint64_t)8377953190224748743U,
-    (uint64_t)3314546646092744596U, (uint64_t)800810188859334358U, (uint64_t)4626344124229343596U,
-    (uint64_t)6620381605850876621U, (uint64_t)11422073570955989527U,
-    (uint64_t)12676813626484814469U, (uint64_t)16725029886764122240U,
-    (uint64_t)16648497372773830008U, (uint64_t)9135702594931291048U,
-    (uint64_t)16080949688826680333U, (uint64_t)11528096561346602947U,
-    (uint64_t)2632498067099740984U, (uint64_t)11583842699108800714U, (uint64_t)8378404864573610526U,
-    (uint64_t)1076560261627788534U, (uint64_t)13836015994325032828U,
-    (uint64_t)11234295937817067909U, (uint64_t)5893659808396722708U,
-    (uint64_t)11277421142886984364U, (uint64_t)8968549037166726491U,
-    (uint64_t)14841374331394032822U, (uint64_t)9967344773947889341U, (uint64_t)8799244393578496085U,
-    (uint64_t)5094686877301601410U, (uint64_t)8780316747074726862U, (uint64_t)9119697306829835718U,
-    (uint64_t)15381243327921855368U, (uint64_t)2686250164449435196U,
-    (uint64_t)16466917280442198358U, (uint64_t)13791704489163125216U,
-    (uint64_t)16955859337117924272U, (uint64_t)17112836394923783642U,
-    (uint64_t)4639176427338618063U, (uint64_t)16770029310141094964U,
-    (uint64_t)11049953922966416185U, (uint64_t)12012669590884098968U,
-    (uint64_t)4859326885929417214U, (uint64_t)896380084392586061U, (uint64_t)7153028362977034008U,
-    (uint64_t)10540021163316263301U, (uint64_t)9318277998512936585U,
-    (uint64_t)18344496977694796523U, (uint64_t)11374737400567645494U,
-    (uint64_t)17158800051138212954U, (uint64_t)18343197867863253153U,
-    (uint64_t)18204799297967861226U, (uint64_t)15798973531606348828U,
-    (uint64_t)9870158263408310459U, (uint64_t)17578869832774612627U, (uint64_t)8395748875822696932U,
-    (uint64_t)15310679007370670872U, (uint64_t)11205576736030808860U,
-    (uint64_t)10123429210002838967U, (uint64_t)5910544144088393959U,
-    (uint64_t)14016615653353687369U, (uint64_t)11191676704772957822U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 14619254753077084366ULL, 13913835116514008593ULL,
+    15060744674088488145ULL, 17668414598203068685ULL, 10761169236902342334ULL,
+    15467027479157446221ULL, 14989185522423469618ULL, 14354539272510107003ULL,
+    14298211796392133693ULL, 13270323784253711450ULL, 13380964971965046957ULL,
+    8686204248456909699ULL, 17434630286744937066ULL, 1355903775279084720ULL, 7554695053550308662ULL,
+    11354971222741863570ULL, 564601613420749879ULL, 8466325837259054896ULL, 10752965181772434263ULL,
+    11405876547368426319ULL, 13791894568738930940ULL, 8230587134406354675ULL,
+    12415514098722758608ULL, 18414183046995786744ULL, 15508000368227372870ULL,
+    5781062464627999307ULL, 15339429052219195590ULL, 16038703753810741903ULL,
+    9587718938298980714ULL, 4822658817952386407ULL, 1376351024833260660ULL, 1120174910554766702ULL,
+    1730170933262569274ULL, 5187428548444533500ULL, 16242053503368957131ULL, 3036811119519868279ULL,
+    1760267587958926638ULL, 170244572981065185ULL, 8063080791967388171ULL, 4824892826607692737ULL,
+    16286391083472040552ULL, 11945158615253358747ULL, 14096887760410224200ULL,
+    1613720831904557039ULL, 14316966673761197523ULL, 17411006201485445341ULL,
+    8112301506943158801ULL, 2069889233927989984ULL, 10082848378277483927ULL, 3609691194454404430ULL,
+    6110437205371933689ULL, 9769135977342231601ULL, 11977962151783386478ULL,
+    18088718692559983573ULL, 11741637975753055ULL, 11110390325701582190ULL, 1341402251566067019ULL,
+    3028229550849726478ULL, 10438984083997451310ULL, 12730851885100145709ULL,
+    11524169532089894189ULL, 4523375903229602674ULL, 2028602258037385622ULL,
+    17082839063089388410ULL, 6103921364634113167ULL, 17066180888225306102ULL,
+    11395680486707876195ULL, 10952892272443345484ULL, 8792831960605859401ULL,
+    14194485427742325139ULL, 15146020821144305250ULL, 1654766014957123343ULL,
+    7955526243090948551ULL, 3989277566080493308ULL, 12229385116397931231ULL,
+    13430548930727025562ULL, 3434892688179800602ULL, 8431998794645622027ULL,
+    12132530981596299272ULL, 2289461608863966999ULL, 18345870950201487179ULL,
+    13517947207801901576ULL, 5213113244172561159ULL, 17632986594098340879ULL,
+    4405251818133148856ULL, 11783009269435447793ULL, 9332138983770046035ULL,
+    12863411548922539505ULL, 3717030292816178224ULL, 10026078446427137374ULL,
+    11167295326594317220ULL, 12425328773141588668ULL, 5760335125172049352ULL,
+    9016843701117277863ULL, 5657892835694680172ULL, 11025130589305387464ULL, 1368484957977406173ULL,
+    17361351345281258834ULL, 1907113641956152700ULL, 16439233413531427752ULL,
+    5893322296986588932ULL, 14000206906171746627ULL, 14979266987545792900ULL,
+    6926291766898221120ULL, 7162023296083360752ULL, 14762747553625382529ULL,
+    12610831658612406849ULL, 10462926899548715515ULL, 4794017723140405312ULL,
+    5234438200490163319ULL, 8019519110339576320ULL, 7194604241290530100ULL, 12626770134810813246ULL,
+    10793074474236419890ULL, 11323224347913978783ULL, 16831128015895380245ULL,
+    18323094195124693378ULL, 2361097165281567692ULL, 15755578675014279498ULL,
+    14289876470325854580ULL, 12856787656093616839ULL, 3578928531243900594ULL,
+    3847532758790503699ULL, 8377953190224748743ULL, 3314546646092744596ULL, 800810188859334358ULL,
+    4626344124229343596ULL, 6620381605850876621ULL, 11422073570955989527ULL,
+    12676813626484814469ULL, 16725029886764122240ULL, 16648497372773830008ULL,
+    9135702594931291048ULL, 16080949688826680333ULL, 11528096561346602947ULL,
+    2632498067099740984ULL, 11583842699108800714ULL, 8378404864573610526ULL, 1076560261627788534ULL,
+    13836015994325032828ULL, 11234295937817067909ULL, 5893659808396722708ULL,
+    11277421142886984364ULL, 8968549037166726491ULL, 14841374331394032822ULL,
+    9967344773947889341ULL, 8799244393578496085ULL, 5094686877301601410ULL, 8780316747074726862ULL,
+    9119697306829835718ULL, 15381243327921855368ULL, 2686250164449435196ULL,
+    16466917280442198358ULL, 13791704489163125216ULL, 16955859337117924272ULL,
+    17112836394923783642ULL, 4639176427338618063ULL, 16770029310141094964ULL,
+    11049953922966416185ULL, 12012669590884098968ULL, 4859326885929417214ULL, 896380084392586061ULL,
+    7153028362977034008ULL, 10540021163316263301ULL, 9318277998512936585ULL,
+    18344496977694796523ULL, 11374737400567645494ULL, 17158800051138212954ULL,
+    18343197867863253153ULL, 18204799297967861226ULL, 15798973531606348828ULL,
+    9870158263408310459ULL, 17578869832774612627ULL, 8395748875822696932ULL,
+    15310679007370670872ULL, 11205576736030808860ULL, 10123429210002838967ULL,
+    5910544144088393959ULL, 14016615653353687369ULL, 11191676704772957822ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)7870395003430845958U,
-    (uint64_t)18001862936410067720U, (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-    (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U, (uint64_t)7139806720777708306U,
-    (uint64_t)8253938546650739833U, (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-    (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U, (uint64_t)8392845221328116213U,
-    (uint64_t)14630296398338540788U, (uint64_t)4268947906723414372U, (uint64_t)9231207002243517909U,
-    (uint64_t)14261219637616504262U, (uint64_t)7786881626982345356U,
-    (uint64_t)11412720751765882139U, (uint64_t)14119585051365330009U,
-    (uint64_t)15281626286521302128U, (uint64_t)6350171933454266732U,
-    (uint64_t)16559468304937127866U, (uint64_t)13200760478271693417U,
-    (uint64_t)6733381546280350776U, (uint64_t)3801404890075189193U, (uint64_t)2741036364686993903U,
-    (uint64_t)3218612940540174008U, (uint64_t)10894914335165419505U,
-    (uint64_t)11862941430149998362U, (uint64_t)4223151729402839584U, (uint64_t)2913215088487087887U,
-    (uint64_t)14562168920104952953U, (uint64_t)2170089393468287453U,
-    (uint64_t)10520900655016579352U, (uint64_t)7040362608949989273U, (uint64_t)8376510559381705307U,
-    (uint64_t)9142237200448131532U, (uint64_t)5696859948123854080U, (uint64_t)925422306716081180U,
-    (uint64_t)11155545953469186421U, (uint64_t)1888208646862572812U,
-    (uint64_t)11151095998248845721U, (uint64_t)15793503271680275267U,
-    (uint64_t)7729877044494854851U, (uint64_t)6235134673193032913U, (uint64_t)7364280682182401564U,
-    (uint64_t)5479679373325519985U, (uint64_t)17966037684582301763U,
-    (uint64_t)14140891609330279185U, (uint64_t)5814744449740463867U, (uint64_t)5652588426712591652U,
-    (uint64_t)774745682988690912U, (uint64_t)13228255573220500373U, (uint64_t)11949122068786859397U,
-    (uint64_t)8021166392900770376U, (uint64_t)7994323710948720063U, (uint64_t)9924618472877849977U,
-    (uint64_t)17618517523141194266U, (uint64_t)2750424097794401714U,
-    (uint64_t)15481749570715253207U, (uint64_t)14646964509921760497U,
-    (uint64_t)1037442848094301355U, (uint64_t)6295995947389299132U, (uint64_t)16915049722317579514U,
-    (uint64_t)10493877400992990313U, (uint64_t)18391008753060553521U, (uint64_t)483942209623707598U,
-    (uint64_t)2017775662838016613U, (uint64_t)5933251998459363553U, (uint64_t)11789135019970707407U,
-    (uint64_t)5484123723153268336U, (uint64_t)13246954648848484954U, (uint64_t)4774374393926023505U,
-    (uint64_t)14863995618704457336U, (uint64_t)13220153167104973625U,
-    (uint64_t)5988445485312390826U, (uint64_t)17580359464028944682U, (uint64_t)7297100131969874771U,
-    (uint64_t)379931507867989375U, (uint64_t)10927113096513421444U, (uint64_t)17688881974428340857U,
-    (uint64_t)4259872578781463333U, (uint64_t)8573076295966784472U, (uint64_t)16389829450727275032U,
-    (uint64_t)1667243868963568259U, (uint64_t)17730726848925960919U,
-    (uint64_t)11408899874569778008U, (uint64_t)3576527582023272268U,
-    (uint64_t)16492920640224231656U, (uint64_t)7906130545972460130U,
-    (uint64_t)13878604278207681266U, (uint64_t)41446695125652041U, (uint64_t)8891615271337333503U,
-    (uint64_t)2594537723613594470U, (uint64_t)7699579176995770924U, (uint64_t)147458463055730655U,
-    (uint64_t)12120406862739088406U, (uint64_t)12044892493010567063U,
-    (uint64_t)8554076749615475136U, (uint64_t)1005097692260929999U, (uint64_t)2687202654471188715U,
-    (uint64_t)9457588752176879209U, (uint64_t)17472884880062444019U, (uint64_t)9792097892056020166U,
-    (uint64_t)2525246678512797150U, (uint64_t)15958903035313115662U,
-    (uint64_t)11336038170342247032U, (uint64_t)11560342382835141123U,
-    (uint64_t)6212009033479929024U, (uint64_t)8214308203775021229U, (uint64_t)8475469210070503698U,
-    (uint64_t)13287024123485719563U, (uint64_t)12956951963817520723U,
-    (uint64_t)10693035819908470465U, (uint64_t)11375478788224786725U,
-    (uint64_t)16934625208487120398U, (uint64_t)10094585729115874495U,
-    (uint64_t)2763884524395905776U, (uint64_t)13535890148969964883U,
-    (uint64_t)13514657411765064358U, (uint64_t)9903074440788027562U,
-    (uint64_t)17324720726421199990U, (uint64_t)2273931039117368789U, (uint64_t)3442641041506157854U,
-    (uint64_t)1119853641236409612U, (uint64_t)12037070344296077989U, (uint64_t)581736433335671746U,
-    (uint64_t)6019150647054369174U, (uint64_t)14864096138068789375U, (uint64_t)6652995210998318662U,
-    (uint64_t)12773883697029175304U, (uint64_t)12751275631451845119U,
-    (uint64_t)11449095003038250478U, (uint64_t)1025805267334366480U, (uint64_t)2764432500300815015U,
-    (uint64_t)18274564429002844381U, (uint64_t)10445634195592600351U,
-    (uint64_t)11814099592837202735U, (uint64_t)5006796893679120289U, (uint64_t)6908397253997261914U,
-    (uint64_t)13266696965302879279U, (uint64_t)7768715053015037430U, (uint64_t)3569923738654785686U,
-    (uint64_t)5844853453464857549U, (uint64_t)1837340805629559110U, (uint64_t)1034657624388283114U,
-    (uint64_t)711244516069456460U, (uint64_t)12519286026957934814U, (uint64_t)2613464944620837619U,
-    (uint64_t)10003023321338286213U, (uint64_t)7291332092642881376U, (uint64_t)9832199564117004897U,
-    (uint64_t)3280736694860799890U, (uint64_t)6416452202849179874U, (uint64_t)7326961381798642069U,
-    (uint64_t)8435688798040635029U, (uint64_t)16630141263910982958U,
-    (uint64_t)17222635514422533318U, (uint64_t)9482787389178881499U, (uint64_t)836561194658263905U,
-    (uint64_t)3405319043337616649U, (uint64_t)2786146577568026518U, (uint64_t)7625483685691626321U,
-    (uint64_t)6728084875304656716U, (uint64_t)1140997959232544268U, (uint64_t)12847384827606303792U,
-    (uint64_t)1719121337754572070U, (uint64_t)12863589482936438532U, (uint64_t)3880712899640530862U,
-    (uint64_t)2748456882813671564U, (uint64_t)4775988900044623019U, (uint64_t)8937847374382191162U,
-    (uint64_t)3767367347172252295U, (uint64_t)13468672401049388646U,
-    (uint64_t)14359032216842397576U, (uint64_t)2002555958685443975U,
-    (uint64_t)16488678606651526810U, (uint64_t)11826135409597474760U,
-    (uint64_t)15296495673182508601U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+    5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL, 7139806720777708306ULL,
+    8253938546650739833ULL, 17490482834545705718ULL, 1065249776797037500ULL, 5018258455937968775ULL,
+    14100621120178668337ULL, 8392845221328116213ULL, 14630296398338540788ULL,
+    4268947906723414372ULL, 9231207002243517909ULL, 14261219637616504262ULL, 7786881626982345356ULL,
+    11412720751765882139ULL, 14119585051365330009ULL, 15281626286521302128ULL,
+    6350171933454266732ULL, 16559468304937127866ULL, 13200760478271693417ULL,
+    6733381546280350776ULL, 3801404890075189193ULL, 2741036364686993903ULL, 3218612940540174008ULL,
+    10894914335165419505ULL, 11862941430149998362ULL, 4223151729402839584ULL,
+    2913215088487087887ULL, 14562168920104952953ULL, 2170089393468287453ULL,
+    10520900655016579352ULL, 7040362608949989273ULL, 8376510559381705307ULL, 9142237200448131532ULL,
+    5696859948123854080ULL, 925422306716081180ULL, 11155545953469186421ULL, 1888208646862572812ULL,
+    11151095998248845721ULL, 15793503271680275267ULL, 7729877044494854851ULL,
+    6235134673193032913ULL, 7364280682182401564ULL, 5479679373325519985ULL, 17966037684582301763ULL,
+    14140891609330279185ULL, 5814744449740463867ULL, 5652588426712591652ULL, 774745682988690912ULL,
+    13228255573220500373ULL, 11949122068786859397ULL, 8021166392900770376ULL,
+    7994323710948720063ULL, 9924618472877849977ULL, 17618517523141194266ULL, 2750424097794401714ULL,
+    15481749570715253207ULL, 14646964509921760497ULL, 1037442848094301355ULL,
+    6295995947389299132ULL, 16915049722317579514ULL, 10493877400992990313ULL,
+    18391008753060553521ULL, 483942209623707598ULL, 2017775662838016613ULL, 5933251998459363553ULL,
+    11789135019970707407ULL, 5484123723153268336ULL, 13246954648848484954ULL,
+    4774374393926023505ULL, 14863995618704457336ULL, 13220153167104973625ULL,
+    5988445485312390826ULL, 17580359464028944682ULL, 7297100131969874771ULL, 379931507867989375ULL,
+    10927113096513421444ULL, 17688881974428340857ULL, 4259872578781463333ULL,
+    8573076295966784472ULL, 16389829450727275032ULL, 1667243868963568259ULL,
+    17730726848925960919ULL, 11408899874569778008ULL, 3576527582023272268ULL,
+    16492920640224231656ULL, 7906130545972460130ULL, 13878604278207681266ULL, 41446695125652041ULL,
+    8891615271337333503ULL, 2594537723613594470ULL, 7699579176995770924ULL, 147458463055730655ULL,
+    12120406862739088406ULL, 12044892493010567063ULL, 8554076749615475136ULL,
+    1005097692260929999ULL, 2687202654471188715ULL, 9457588752176879209ULL, 17472884880062444019ULL,
+    9792097892056020166ULL, 2525246678512797150ULL, 15958903035313115662ULL,
+    11336038170342247032ULL, 11560342382835141123ULL, 6212009033479929024ULL,
+    8214308203775021229ULL, 8475469210070503698ULL, 13287024123485719563ULL,
+    12956951963817520723ULL, 10693035819908470465ULL, 11375478788224786725ULL,
+    16934625208487120398ULL, 10094585729115874495ULL, 2763884524395905776ULL,
+    13535890148969964883ULL, 13514657411765064358ULL, 9903074440788027562ULL,
+    17324720726421199990ULL, 2273931039117368789ULL, 3442641041506157854ULL, 1119853641236409612ULL,
+    12037070344296077989ULL, 581736433335671746ULL, 6019150647054369174ULL, 14864096138068789375ULL,
+    6652995210998318662ULL, 12773883697029175304ULL, 12751275631451845119ULL,
+    11449095003038250478ULL, 1025805267334366480ULL, 2764432500300815015ULL,
+    18274564429002844381ULL, 10445634195592600351ULL, 11814099592837202735ULL,
+    5006796893679120289ULL, 6908397253997261914ULL, 13266696965302879279ULL, 7768715053015037430ULL,
+    3569923738654785686ULL, 5844853453464857549ULL, 1837340805629559110ULL, 1034657624388283114ULL,
+    711244516069456460ULL, 12519286026957934814ULL, 2613464944620837619ULL, 10003023321338286213ULL,
+    7291332092642881376ULL, 9832199564117004897ULL, 3280736694860799890ULL, 6416452202849179874ULL,
+    7326961381798642069ULL, 8435688798040635029ULL, 16630141263910982958ULL,
+    17222635514422533318ULL, 9482787389178881499ULL, 836561194658263905ULL, 3405319043337616649ULL,
+    2786146577568026518ULL, 7625483685691626321ULL, 6728084875304656716ULL, 1140997959232544268ULL,
+    12847384827606303792ULL, 1719121337754572070ULL, 12863589482936438532ULL,
+    3880712899640530862ULL, 2748456882813671564ULL, 4775988900044623019ULL, 8937847374382191162ULL,
+    3767367347172252295ULL, 13468672401049388646ULL, 14359032216842397576ULL,
+    2002555958685443975ULL, 16488678606651526810ULL, 11826135409597474760ULL,
+    15296495673182508601ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w5[384U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U,
-    (uint64_t)13240193491554624643U, (uint64_t)12365034249541329920U,
-    (uint64_t)2924326828590977357U, (uint64_t)5687195797140589099U, (uint64_t)16880427227292834531U,
-    (uint64_t)9691471435758991112U, (uint64_t)16642385273732487288U,
-    (uint64_t)12173806747523009914U, (uint64_t)13142722756877876849U,
-    (uint64_t)8370377548305121979U, (uint64_t)17988526053752025426U, (uint64_t)4818750752684100334U,
-    (uint64_t)5669241919350361655U, (uint64_t)4964810303238518540U, (uint64_t)16709712747671533191U,
-    (uint64_t)4461414404267448242U, (uint64_t)3971798785139504238U, (uint64_t)6276818948740422136U,
-    (uint64_t)1426735892164275762U, (uint64_t)7943622674892418919U, (uint64_t)9864274225563929680U,
-    (uint64_t)57815533745003233U, (uint64_t)10893588105168960233U, (uint64_t)15739162732907069535U,
-    (uint64_t)3923866849462073470U, (uint64_t)12279826158399226875U, (uint64_t)1533015761334846582U,
-    (uint64_t)15860156818568437510U, (uint64_t)8252625373831297988U, (uint64_t)9666953804812706358U,
-    (uint64_t)8767785238646914634U, (uint64_t)14382179044941403551U,
-    (uint64_t)10401039907264254245U, (uint64_t)8584860003763157350U, (uint64_t)3120462679504470266U,
-    (uint64_t)8670255778748340069U, (uint64_t)5313789577940369984U, (uint64_t)16977072364454789224U,
-    (uint64_t)12199578693972188324U, (uint64_t)18211098771672599237U,
-    (uint64_t)12868831556008795030U, (uint64_t)5310155061431048194U,
-    (uint64_t)18114153238435112606U, (uint64_t)14482365809278304512U,
-    (uint64_t)12520721662723001511U, (uint64_t)405943624021143002U, (uint64_t)8146944101507657423U,
-    (uint64_t)181739317780393495U, (uint64_t)81743892273670099U, (uint64_t)14759561962550473930U,
-    (uint64_t)4592623849546992939U, (uint64_t)6916440441743449719U, (uint64_t)1304610503530809833U,
-    (uint64_t)5464930909232486441U, (uint64_t)15414883617496224671U, (uint64_t)8129283345256790U,
-    (uint64_t)18294252198413739489U, (uint64_t)17394115281884857288U,
-    (uint64_t)7808348415224731235U, (uint64_t)13195566655747230608U, (uint64_t)8568194219353949094U,
-    (uint64_t)15329813048672122440U, (uint64_t)9604275495885785744U, (uint64_t)1577712551205219835U,
-    (uint64_t)15964209008022052790U, (uint64_t)15087297920782098160U,
-    (uint64_t)3946031512438511898U, (uint64_t)10050061168984440631U,
-    (uint64_t)11382452014533138316U, (uint64_t)6313670788911952792U,
-    (uint64_t)12015989229696164014U, (uint64_t)5946702628076168852U, (uint64_t)5219995658774362841U,
-    (uint64_t)12230141881068377972U, (uint64_t)12361195202673441956U,
-    (uint64_t)4732862275653856711U, (uint64_t)17221430380805252370U,
-    (uint64_t)15397525953897375810U, (uint64_t)16557437297239563045U,
-    (uint64_t)10101683801868971351U, (uint64_t)1402611372245592868U, (uint64_t)1931806383735563658U,
-    (uint64_t)10991705207471512479U, (uint64_t)861333583207471392U, (uint64_t)15207766844626322355U,
-    (uint64_t)9224628129811432393U, (uint64_t)3497069567089055613U, (uint64_t)11956632757898590316U,
-    (uint64_t)8733729372586312960U, (uint64_t)18091521051714930927U, (uint64_t)77582787724373283U,
-    (uint64_t)9922437373519669237U, (uint64_t)3079321456325704615U, (uint64_t)12171198408512478457U,
-    (uint64_t)17179130884012147596U, (uint64_t)6839115479620367181U, (uint64_t)4421032569964105406U,
-    (uint64_t)10353331468657256053U, (uint64_t)17400988720335968824U,
-    (uint64_t)17138855889417480540U, (uint64_t)4507980080381370611U,
-    (uint64_t)10703175719793781886U, (uint64_t)12598516658725890426U,
-    (uint64_t)8353463412173898932U, (uint64_t)17703029389228422404U, (uint64_t)9313111267107226233U,
-    (uint64_t)5441322942995154196U, (uint64_t)8952817660034465484U, (uint64_t)17571113341183703118U,
-    (uint64_t)7375087953801067019U, (uint64_t)13381466302076453648U, (uint64_t)3218165271423914596U,
-    (uint64_t)16956372157249382685U, (uint64_t)509080090049418841U, (uint64_t)13374233893294084913U,
-    (uint64_t)2988537624204297086U, (uint64_t)4979195832939384620U, (uint64_t)3803931594068976394U,
-    (uint64_t)10731535883829627646U, (uint64_t)12954845047607194278U,
-    (uint64_t)10494298062560667399U, (uint64_t)4967351022190213065U,
-    (uint64_t)13391917938145756456U, (uint64_t)951370484866918160U, (uint64_t)13531334179067685307U,
-    (uint64_t)12868421357919390599U, (uint64_t)15918857042998130258U,
-    (uint64_t)17769743831936974016U, (uint64_t)7137921979260368809U,
-    (uint64_t)12461369180685892062U, (uint64_t)827476514081935199U, (uint64_t)15107282134224767230U,
-    (uint64_t)10084765752802805748U, (uint64_t)3303739059392464407U,
-    (uint64_t)17859532612136591428U, (uint64_t)10949414770405040164U,
-    (uint64_t)12838613589371008785U, (uint64_t)5554397169231540728U,
-    (uint64_t)18375114572169624408U, (uint64_t)15649286703242390139U,
-    (uint64_t)2957281557463706877U, (uint64_t)14000350446219393213U,
-    (uint64_t)14355199721749620351U, (uint64_t)2730856240099299695U,
-    (uint64_t)17528131000714705752U, (uint64_t)2537498525883536360U, (uint64_t)6121058967084509393U,
-    (uint64_t)16897667060435514221U, (uint64_t)12367869599571112440U,
-    (uint64_t)3388831797050807508U, (uint64_t)16791449724090982798U, (uint64_t)2673426123453294928U,
-    (uint64_t)11369313542384405846U, (uint64_t)15641960333586432634U,
-    (uint64_t)15080962589658958379U, (uint64_t)7747943772340226569U, (uint64_t)8075023376199159152U,
-    (uint64_t)8485093027378306528U, (uint64_t)13503706844122243648U, (uint64_t)8401961362938086226U,
-    (uint64_t)8125426002124226402U, (uint64_t)9005399361407785203U, (uint64_t)6847968030066906634U,
-    (uint64_t)11934937736309295197U, (uint64_t)5116750888594772351U, (uint64_t)2817039227179245227U,
-    (uint64_t)17724206901239332980U, (uint64_t)4985702708254058578U, (uint64_t)5786345435756642871U,
-    (uint64_t)17772527414940936938U, (uint64_t)1201320251272957006U,
-    (uint64_t)15787430120324348129U, (uint64_t)6305488781359965661U,
-    (uint64_t)12423900845502858433U, (uint64_t)17485949424202277720U,
-    (uint64_t)2062237315546855852U, (uint64_t)10353639467860902375U, (uint64_t)2315398490451287299U,
-    (uint64_t)15394572894814882621U, (uint64_t)232866113801165640U, (uint64_t)7413443736109338926U,
-    (uint64_t)902719806551551191U, (uint64_t)16568853118619045174U, (uint64_t)14202214862428279177U,
-    (uint64_t)11719595395278861192U, (uint64_t)5890053236389907647U, (uint64_t)9996196494965833627U,
-    (uint64_t)12967056942364782577U, (uint64_t)9034128755157395787U,
-    (uint64_t)17898204904710512655U, (uint64_t)8229373445062993977U,
-    (uint64_t)13580036169519833644U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL, 13240193491554624643ULL, 12365034249541329920ULL,
+    2924326828590977357ULL, 5687195797140589099ULL, 16880427227292834531ULL, 9691471435758991112ULL,
+    16642385273732487288ULL, 12173806747523009914ULL, 13142722756877876849ULL,
+    8370377548305121979ULL, 17988526053752025426ULL, 4818750752684100334ULL, 5669241919350361655ULL,
+    4964810303238518540ULL, 16709712747671533191ULL, 4461414404267448242ULL, 3971798785139504238ULL,
+    6276818948740422136ULL, 1426735892164275762ULL, 7943622674892418919ULL, 9864274225563929680ULL,
+    57815533745003233ULL, 10893588105168960233ULL, 15739162732907069535ULL, 3923866849462073470ULL,
+    12279826158399226875ULL, 1533015761334846582ULL, 15860156818568437510ULL,
+    8252625373831297988ULL, 9666953804812706358ULL, 8767785238646914634ULL, 14382179044941403551ULL,
+    10401039907264254245ULL, 8584860003763157350ULL, 3120462679504470266ULL, 8670255778748340069ULL,
+    5313789577940369984ULL, 16977072364454789224ULL, 12199578693972188324ULL,
+    18211098771672599237ULL, 12868831556008795030ULL, 5310155061431048194ULL,
+    18114153238435112606ULL, 14482365809278304512ULL, 12520721662723001511ULL,
+    405943624021143002ULL, 8146944101507657423ULL, 181739317780393495ULL, 81743892273670099ULL,
+    14759561962550473930ULL, 4592623849546992939ULL, 6916440441743449719ULL, 1304610503530809833ULL,
+    5464930909232486441ULL, 15414883617496224671ULL, 8129283345256790ULL, 18294252198413739489ULL,
+    17394115281884857288ULL, 7808348415224731235ULL, 13195566655747230608ULL,
+    8568194219353949094ULL, 15329813048672122440ULL, 9604275495885785744ULL, 1577712551205219835ULL,
+    15964209008022052790ULL, 15087297920782098160ULL, 3946031512438511898ULL,
+    10050061168984440631ULL, 11382452014533138316ULL, 6313670788911952792ULL,
+    12015989229696164014ULL, 5946702628076168852ULL, 5219995658774362841ULL,
+    12230141881068377972ULL, 12361195202673441956ULL, 4732862275653856711ULL,
+    17221430380805252370ULL, 15397525953897375810ULL, 16557437297239563045ULL,
+    10101683801868971351ULL, 1402611372245592868ULL, 1931806383735563658ULL,
+    10991705207471512479ULL, 861333583207471392ULL, 15207766844626322355ULL, 9224628129811432393ULL,
+    3497069567089055613ULL, 11956632757898590316ULL, 8733729372586312960ULL,
+    18091521051714930927ULL, 77582787724373283ULL, 9922437373519669237ULL, 3079321456325704615ULL,
+    12171198408512478457ULL, 17179130884012147596ULL, 6839115479620367181ULL,
+    4421032569964105406ULL, 10353331468657256053ULL, 17400988720335968824ULL,
+    17138855889417480540ULL, 4507980080381370611ULL, 10703175719793781886ULL,
+    12598516658725890426ULL, 8353463412173898932ULL, 17703029389228422404ULL,
+    9313111267107226233ULL, 5441322942995154196ULL, 8952817660034465484ULL, 17571113341183703118ULL,
+    7375087953801067019ULL, 13381466302076453648ULL, 3218165271423914596ULL,
+    16956372157249382685ULL, 509080090049418841ULL, 13374233893294084913ULL, 2988537624204297086ULL,
+    4979195832939384620ULL, 3803931594068976394ULL, 10731535883829627646ULL,
+    12954845047607194278ULL, 10494298062560667399ULL, 4967351022190213065ULL,
+    13391917938145756456ULL, 951370484866918160ULL, 13531334179067685307ULL,
+    12868421357919390599ULL, 15918857042998130258ULL, 17769743831936974016ULL,
+    7137921979260368809ULL, 12461369180685892062ULL, 827476514081935199ULL, 15107282134224767230ULL,
+    10084765752802805748ULL, 3303739059392464407ULL, 17859532612136591428ULL,
+    10949414770405040164ULL, 12838613589371008785ULL, 5554397169231540728ULL,
+    18375114572169624408ULL, 15649286703242390139ULL, 2957281557463706877ULL,
+    14000350446219393213ULL, 14355199721749620351ULL, 2730856240099299695ULL,
+    17528131000714705752ULL, 2537498525883536360ULL, 6121058967084509393ULL,
+    16897667060435514221ULL, 12367869599571112440ULL, 3388831797050807508ULL,
+    16791449724090982798ULL, 2673426123453294928ULL, 11369313542384405846ULL,
+    15641960333586432634ULL, 15080962589658958379ULL, 7747943772340226569ULL,
+    8075023376199159152ULL, 8485093027378306528ULL, 13503706844122243648ULL, 8401961362938086226ULL,
+    8125426002124226402ULL, 9005399361407785203ULL, 6847968030066906634ULL, 11934937736309295197ULL,
+    5116750888594772351ULL, 2817039227179245227ULL, 17724206901239332980ULL, 4985702708254058578ULL,
+    5786345435756642871ULL, 17772527414940936938ULL, 1201320251272957006ULL,
+    15787430120324348129ULL, 6305488781359965661ULL, 12423900845502858433ULL,
+    17485949424202277720ULL, 2062237315546855852ULL, 10353639467860902375ULL,
+    2315398490451287299ULL, 15394572894814882621ULL, 232866113801165640ULL, 7413443736109338926ULL,
+    902719806551551191ULL, 16568853118619045174ULL, 14202214862428279177ULL,
+    11719595395278861192ULL, 5890053236389907647ULL, 9996196494965833627ULL,
+    12967056942364782577ULL, 9034128755157395787ULL, 17898204904710512655ULL,
+    8229373445062993977ULL, 13580036169519833644ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/EverCrypt_Hash.h b/include/msvc/EverCrypt_Hash.h
index 6791dc27..431b1375 100644
--- a/include/msvc/EverCrypt_Hash.h
+++ b/include/msvc/EverCrypt_Hash.h
@@ -121,29 +121,29 @@ EverCrypt_Hash_Incremental_hash(
   uint32_t len
 );
 
-#define MD5_HASH_LEN ((uint32_t)16U)
+#define MD5_HASH_LEN (16U)
 
-#define SHA1_HASH_LEN ((uint32_t)20U)
+#define SHA1_HASH_LEN (20U)
 
-#define SHA2_224_HASH_LEN ((uint32_t)28U)
+#define SHA2_224_HASH_LEN (28U)
 
-#define SHA2_256_HASH_LEN ((uint32_t)32U)
+#define SHA2_256_HASH_LEN (32U)
 
-#define SHA2_384_HASH_LEN ((uint32_t)48U)
+#define SHA2_384_HASH_LEN (48U)
 
-#define SHA2_512_HASH_LEN ((uint32_t)64U)
+#define SHA2_512_HASH_LEN (64U)
 
-#define SHA3_224_HASH_LEN ((uint32_t)28U)
+#define SHA3_224_HASH_LEN (28U)
 
-#define SHA3_256_HASH_LEN ((uint32_t)32U)
+#define SHA3_256_HASH_LEN (32U)
 
-#define SHA3_384_HASH_LEN ((uint32_t)48U)
+#define SHA3_384_HASH_LEN (48U)
 
-#define SHA3_512_HASH_LEN ((uint32_t)64U)
+#define SHA3_512_HASH_LEN (64U)
 
-#define BLAKE2S_HASH_LEN ((uint32_t)32U)
+#define BLAKE2S_HASH_LEN (32U)
 
-#define BLAKE2B_HASH_LEN ((uint32_t)64U)
+#define BLAKE2B_HASH_LEN (64U)
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/Hacl_IntTypes_Intrinsics.h b/include/msvc/Hacl_IntTypes_Intrinsics.h
index e2a193e9..c816b046 100644
--- a/include/msvc/Hacl_IntTypes_Intrinsics.h
+++ b/include/msvc/Hacl_IntTypes_Intrinsics.h
@@ -41,7 +41,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_add_carry_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x + (uint64_t)cin + (uint64_t)y;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U);
+  uint32_t c = (uint32_t)(res >> 32U);
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -50,7 +50,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_sub_borrow_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x - (uint64_t)y - (uint64_t)cin;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U) & (uint32_t)1U;
+  uint32_t c = (uint32_t)(res >> 32U) & 1U;
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -59,8 +59,7 @@ static inline uint64_t
 Hacl_IntTypes_Intrinsics_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r)
 {
   uint64_t res = x + cin + y;
-  uint64_t
-  c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & (uint64_t)1U;
+  uint64_t c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & 1ULL;
   r[0U] = res;
   return c;
 }
@@ -73,7 +72,7 @@ Hacl_IntTypes_Intrinsics_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, ui
   c =
     ((FStar_UInt64_gte_mask(res, x) & ~FStar_UInt64_eq_mask(res, x))
     | (FStar_UInt64_eq_mask(res, x) & cin))
-    & (uint64_t)1U;
+    & 1ULL;
   r[0U] = res;
   return c;
 }
diff --git a/include/msvc/Hacl_IntTypes_Intrinsics_128.h b/include/msvc/Hacl_IntTypes_Intrinsics_128.h
index aa843a6c..d3008969 100644
--- a/include/msvc/Hacl_IntTypes_Intrinsics_128.h
+++ b/include/msvc/Hacl_IntTypes_Intrinsics_128.h
@@ -45,7 +45,7 @@ Hacl_IntTypes_Intrinsics_128_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y,
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(cin)),
       FStar_UInt128_uint64_to_uint128(y));
-  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
@@ -58,10 +58,7 @@ Hacl_IntTypes_Intrinsics_128_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y
     FStar_UInt128_sub_mod(FStar_UInt128_sub_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(y)),
       FStar_UInt128_uint64_to_uint128(cin));
-  uint64_t
-  c =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U))
-    & (uint64_t)1U;
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U)) & 1ULL;
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
diff --git a/include/msvc/internal/Hacl_Bignum.h b/include/msvc/internal/Hacl_Bignum.h
index 901a8dad..4b31236d 100644
--- a/include/msvc/internal/Hacl_Bignum.h
+++ b/include/msvc/internal/Hacl_Bignum.h
@@ -124,15 +124,6 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(
   uint32_t *res
 );
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
-  uint32_t len,
-  uint32_t *n,
-  uint32_t nInv,
-  uint32_t *c,
-  uint32_t *res
-);
-
 void
 Hacl_Bignum_Montgomery_bn_to_mont_u32(
   uint32_t len,
@@ -181,15 +172,6 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(
   uint64_t *res
 );
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
-  uint32_t len,
-  uint64_t *n,
-  uint64_t nInv,
-  uint64_t *c,
-  uint64_t *res
-);
-
 void
 Hacl_Bignum_Montgomery_bn_to_mont_u64(
   uint32_t len,
@@ -228,6 +210,24 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u64(
   uint64_t *resM
 );
 
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(
+  uint32_t len,
+  uint32_t *n,
+  uint32_t nInv,
+  uint32_t *c,
+  uint32_t *res
+);
+
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(
+  uint32_t len,
+  uint64_t *n,
+  uint64_t nInv,
+  uint64_t *c,
+  uint64_t *res
+);
+
 uint32_t
 Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(
   uint32_t len,
diff --git a/include/msvc/internal/Hacl_Bignum25519_51.h b/include/msvc/internal/Hacl_Bignum25519_51.h
index 25a10503..4678f8a0 100644
--- a/include/msvc/internal/Hacl_Bignum25519_51.h
+++ b/include/msvc/internal/Hacl_Bignum25519_51.h
@@ -69,11 +69,11 @@ static inline void Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1
   uint64_t f23 = f2[3U];
   uint64_t f14 = f1[4U];
   uint64_t f24 = f2[4U];
-  out[0U] = f10 + (uint64_t)0x3fffffffffff68U - f20;
-  out[1U] = f11 + (uint64_t)0x3ffffffffffff8U - f21;
-  out[2U] = f12 + (uint64_t)0x3ffffffffffff8U - f22;
-  out[3U] = f13 + (uint64_t)0x3ffffffffffff8U - f23;
-  out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24;
+  out[0U] = f10 + 0x3fffffffffff68ULL - f20;
+  out[1U] = f11 + 0x3ffffffffffff8ULL - f21;
+  out[2U] = f12 + 0x3ffffffffffff8ULL - f22;
+  out[3U] = f13 + 0x3ffffffffffff8ULL - f23;
+  out[4U] = f14 + 0x3ffffffffffff8ULL - f24;
 }
 
 static inline void
@@ -84,7 +84,7 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -95,10 +95,10 @@ Hacl_Impl_Curve25519_Field51_fmul(
   uint64_t f22 = f2[2U];
   uint64_t f23 = f2[3U];
   uint64_t f24 = f2[4U];
-  uint64_t tmp1 = f21 * (uint64_t)19U;
-  uint64_t tmp2 = f22 * (uint64_t)19U;
-  uint64_t tmp3 = f23 * (uint64_t)19U;
-  uint64_t tmp4 = f24 * (uint64_t)19U;
+  uint64_t tmp1 = f21 * 19ULL;
+  uint64_t tmp2 = f22 * 19ULL;
+  uint64_t tmp3 = f23 * 19ULL;
+  uint64_t tmp4 = f24 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o10 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o20 = FStar_UInt128_mul_wide(f10, f22);
@@ -129,25 +129,24 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 tmp_w2 = o24;
   FStar_UInt128_uint128 tmp_w3 = o34;
   FStar_UInt128_uint128 tmp_w4 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp01 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp01 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp11 + c5;
   uint64_t o2 = tmp21;
@@ -168,7 +167,7 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -189,14 +188,14 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   uint64_t f42 = f2[7U];
   uint64_t f43 = f2[8U];
   uint64_t f44 = f2[9U];
-  uint64_t tmp11 = f21 * (uint64_t)19U;
-  uint64_t tmp12 = f22 * (uint64_t)19U;
-  uint64_t tmp13 = f23 * (uint64_t)19U;
-  uint64_t tmp14 = f24 * (uint64_t)19U;
-  uint64_t tmp21 = f41 * (uint64_t)19U;
-  uint64_t tmp22 = f42 * (uint64_t)19U;
-  uint64_t tmp23 = f43 * (uint64_t)19U;
-  uint64_t tmp24 = f44 * (uint64_t)19U;
+  uint64_t tmp11 = f21 * 19ULL;
+  uint64_t tmp12 = f22 * 19ULL;
+  uint64_t tmp13 = f23 * 19ULL;
+  uint64_t tmp14 = f24 * 19ULL;
+  uint64_t tmp21 = f41 * 19ULL;
+  uint64_t tmp22 = f42 * 19ULL;
+  uint64_t tmp23 = f43 * 19ULL;
+  uint64_t tmp24 = f44 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o15 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o25 = FStar_UInt128_mul_wide(f10, f22);
@@ -257,49 +256,47 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 tmp_w22 = o241;
   FStar_UInt128_uint128 tmp_w23 = o34;
   FStar_UInt128_uint128 tmp_w24 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w11, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w12, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w13, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w14, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o100 = tmp0_;
   uint64_t o112 = tmp10 + c50;
   uint64_t o122 = tmp20;
   uint64_t o132 = tmp30;
   uint64_t o142 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(tmp_w21, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(tmp_w22, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(tmp_w23, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(tmp_w24, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o200 = tmp0_0;
   uint64_t o212 = tmp1 + c5;
   uint64_t o222 = tmp2;
@@ -339,25 +336,24 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
   FStar_UInt128_uint128 tmp_w2 = FStar_UInt128_mul_wide(f2, f12);
   FStar_UInt128_uint128 tmp_w3 = FStar_UInt128_mul_wide(f2, f13);
   FStar_UInt128_uint128 tmp_w4 = FStar_UInt128_mul_wide(f2, f14);
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -373,18 +369,18 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t d0 = (uint64_t)2U * f0;
-  uint64_t d1 = (uint64_t)2U * f1;
-  uint64_t d2 = (uint64_t)38U * f2;
-  uint64_t d3 = (uint64_t)19U * f3;
-  uint64_t d419 = (uint64_t)19U * f4;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f0;
+  uint64_t d1 = 2ULL * f1;
+  uint64_t d2 = 38ULL * f2;
+  uint64_t d3 = 19ULL * f3;
+  uint64_t d419 = 19ULL * f4;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f0, f0),
@@ -415,25 +411,24 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
   FStar_UInt128_uint128 o20 = s2;
   FStar_UInt128_uint128 o30 = s3;
   FStar_UInt128_uint128 o40 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o10, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o20, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o30, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o40, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -449,7 +444,7 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f[0U];
   uint64_t f11 = f[1U];
   uint64_t f12 = f[2U];
@@ -460,12 +455,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   uint64_t f22 = f[7U];
   uint64_t f23 = f[8U];
   uint64_t f24 = f[9U];
-  uint64_t d00 = (uint64_t)2U * f10;
-  uint64_t d10 = (uint64_t)2U * f11;
-  uint64_t d20 = (uint64_t)38U * f12;
-  uint64_t d30 = (uint64_t)19U * f13;
-  uint64_t d4190 = (uint64_t)19U * f14;
-  uint64_t d40 = (uint64_t)2U * d4190;
+  uint64_t d00 = 2ULL * f10;
+  uint64_t d10 = 2ULL * f11;
+  uint64_t d20 = 38ULL * f12;
+  uint64_t d30 = 19ULL * f13;
+  uint64_t d4190 = 19ULL * f14;
+  uint64_t d40 = 2ULL * d4190;
   FStar_UInt128_uint128
   s00 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f10, f10),
@@ -496,12 +491,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o120 = s20;
   FStar_UInt128_uint128 o130 = s30;
   FStar_UInt128_uint128 o140 = s40;
-  uint64_t d0 = (uint64_t)2U * f20;
-  uint64_t d1 = (uint64_t)2U * f21;
-  uint64_t d2 = (uint64_t)38U * f22;
-  uint64_t d3 = (uint64_t)19U * f23;
-  uint64_t d419 = (uint64_t)19U * f24;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f20;
+  uint64_t d1 = 2ULL * f21;
+  uint64_t d2 = 38ULL * f22;
+  uint64_t d3 = 19ULL * f23;
+  uint64_t d419 = 19ULL * f24;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f20, f20),
@@ -532,49 +527,47 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o220 = s2;
   FStar_UInt128_uint128 o230 = s3;
   FStar_UInt128_uint128 o240 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o110, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o120, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o130, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o140, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o101 = tmp0_;
   uint64_t o111 = tmp10 + c50;
   uint64_t o121 = tmp20;
   uint64_t o131 = tmp30;
   uint64_t o141 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(o210, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(o220, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(o230, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(o240, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o201 = tmp0_0;
   uint64_t o211 = tmp1 + c5;
   uint64_t o221 = tmp2;
@@ -609,49 +602,49 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f01 = tmp0_;
   uint64_t f11 = tmp1 + c5;
   uint64_t f21 = tmp2;
   uint64_t f31 = tmp3;
   uint64_t f41 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f01, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f11, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f21, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f31, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f41, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f01, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f11, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f21, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f31, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f41, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f01 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f11 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f21 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f31 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f41 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f01 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f11 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f21 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f31 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f41 - (mask & 0x7ffffffffffffULL);
   uint64_t f02 = f0_;
   uint64_t f12 = f1_;
   uint64_t f22 = f2_;
   uint64_t f32 = f3_;
   uint64_t f42 = f4_;
-  uint64_t o00 = f02 | f12 << (uint32_t)51U;
-  uint64_t o10 = f12 >> (uint32_t)13U | f22 << (uint32_t)38U;
-  uint64_t o20 = f22 >> (uint32_t)26U | f32 << (uint32_t)25U;
-  uint64_t o30 = f32 >> (uint32_t)39U | f42 << (uint32_t)12U;
+  uint64_t o00 = f02 | f12 << 51U;
+  uint64_t o10 = f12 >> 13U | f22 << 38U;
+  uint64_t o20 = f22 >> 26U | f32 << 25U;
+  uint64_t o30 = f32 >> 39U | f42 << 12U;
   uint64_t o0 = o00;
   uint64_t o1 = o10;
   uint64_t o2 = o20;
@@ -665,11 +658,11 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
 static inline void
 Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
 {
-  uint64_t mask = (uint64_t)0U - bit;
+  uint64_t mask = 0ULL - bit;
   KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
+    0U,
+    10U,
+    1U,
     uint64_t dummy = mask & (p1[i] ^ p2[i]);
     p1[i] = p1[i] ^ dummy;
     p2[i] = p2[i] ^ dummy;);
diff --git a/include/msvc/internal/Hacl_Bignum_Base.h b/include/msvc/internal/Hacl_Bignum_Base.h
index e4d35fe9..bafd4896 100644
--- a/include/msvc/internal/Hacl_Bignum_Base.h
+++ b/include/msvc/internal/Hacl_Bignum_Base.h
@@ -45,7 +45,7 @@ Hacl_Bignum_Base_mul_wide_add2_u32(uint32_t a, uint32_t b, uint32_t c_in, uint32
   uint32_t out0 = out[0U];
   uint64_t res = (uint64_t)a * (uint64_t)b + (uint64_t)c_in + (uint64_t)out0;
   out[0U] = (uint32_t)res;
-  return (uint32_t)(res >> (uint32_t)32U);
+  return (uint32_t)(res >> 32U);
 }
 
 static inline uint64_t
@@ -58,22 +58,22 @@ Hacl_Bignum_Base_mul_wide_add2_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64
         FStar_UInt128_uint64_to_uint128(c_in)),
       FStar_UInt128_uint64_to_uint128(out0));
   out[0U] = FStar_UInt128_uint128_to_uint64(res);
-  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
 }
 
 static inline void
 Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -82,24 +82,24 @@ Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *
 static inline void
 Hacl_Bignum_Convert_bn_to_bytes_be_uint64(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
 
 static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32_t *b)
 {
-  uint32_t priv = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t priv = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint32_t mask = FStar_UInt32_eq_mask(b[i], (uint32_t)0U);
+    uint32_t mask = FStar_UInt32_eq_mask(b[i], 0U);
     priv = (mask & priv) | (~mask & i);
   }
   return priv;
@@ -107,10 +107,10 @@ static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32
 
 static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64_t *b)
 {
-  uint64_t priv = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t priv = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint64_t mask = FStar_UInt64_eq_mask(b[i], (uint64_t)0U);
+    uint64_t mask = FStar_UInt64_eq_mask(b[i], 0ULL);
     priv = (mask & priv) | (~mask & (uint64_t)i);
   }
   return priv;
@@ -119,63 +119,63 @@ static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64
 static inline uint32_t
 Hacl_Bignum_Lib_bn_get_bits_u32(uint32_t len, uint32_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)32U;
-  uint32_t j = i % (uint32_t)32U;
+  uint32_t i1 = i / 32U;
+  uint32_t j = i % 32U;
   uint32_t p1 = b[i1] >> j;
   uint32_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j);
+    ite = p1 | b[i1 + 1U] << (32U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint32_t)1U << l) - (uint32_t)1U);
+  return ite & ((1U << l) - 1U);
 }
 
 static inline uint64_t
 Hacl_Bignum_Lib_bn_get_bits_u64(uint32_t len, uint64_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)64U;
-  uint32_t j = i % (uint32_t)64U;
+  uint32_t i1 = i / 64U;
+  uint32_t j = i % 64U;
   uint64_t p1 = b[i1] >> j;
   uint64_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j);
+    ite = p1 | b[i1 + 1U] << (64U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint64_t)1U << l) - (uint64_t)1U);
+  return ite & ((1ULL << l) - 1ULL);
 }
 
 static inline uint32_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -188,27 +188,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -221,27 +221,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b,
 static inline uint32_t
 Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -254,27 +254,27 @@ Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_add_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -294,27 +294,27 @@ Hacl_Bignum_Multiplication_bn_mul_u32(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint32_t a_i = a[i];
       uint32_t *res_i = res_j + i;
@@ -335,27 +335,27 @@ Hacl_Bignum_Multiplication_bn_mul_u64(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -370,28 +370,28 @@ static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -401,48 +401,48 @@ Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
     res[i0 + i0] = r;
   }
   uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen);
   uint32_t *tmp = (uint32_t *)alloca((aLen + aLen) * sizeof (uint32_t));
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -452,20 +452,20 @@ Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
     res[i0 + i0] = r;
   }
   uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen);
   uint64_t *tmp = (uint64_t *)alloca((aLen + aLen) * sizeof (uint64_t));
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Bignum_K256.h b/include/msvc/internal/Hacl_Bignum_K256.h
index 59aff176..fe72fffe 100644
--- a/include/msvc/internal/Hacl_Bignum_K256.h
+++ b/include/msvc/internal/Hacl_Bignum_K256.h
@@ -45,13 +45,7 @@ static inline bool Hacl_K256_Field_is_felem_zero_vartime(uint64_t *f)
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  return
-    f0
-    == (uint64_t)0U
-    && f1 == (uint64_t)0U
-    && f2 == (uint64_t)0U
-    && f3 == (uint64_t)0U
-    && f4 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL && f4 == 0ULL;
 }
 
 static inline bool Hacl_K256_Field_is_felem_eq_vartime(uint64_t *f1, uint64_t *f2)
@@ -76,42 +70,42 @@ static inline bool Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(uint64_
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  if (f4 > (uint64_t)0U)
+  if (f4 > 0ULL)
   {
     return false;
   }
-  if (f3 > (uint64_t)0U)
+  if (f3 > 0ULL)
   {
     return false;
   }
-  if (f2 < (uint64_t)0x1455123U)
+  if (f2 < 0x1455123ULL)
   {
     return true;
   }
-  if (f2 > (uint64_t)0x1455123U)
+  if (f2 > 0x1455123ULL)
   {
     return false;
   }
-  if (f1 < (uint64_t)0x1950b75fc4402U)
+  if (f1 < 0x1950b75fc4402ULL)
   {
     return true;
   }
-  if (f1 > (uint64_t)0x1950b75fc4402U)
+  if (f1 > 0x1950b75fc4402ULL)
   {
     return false;
   }
-  return f0 < (uint64_t)0xda1722fc9baeeU;
+  return f0 < 0xda1722fc9baeeULL;
 }
 
 static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = b + i * (uint32_t)8U;
+    uint8_t *bj = b + i * 8U;
     uint64_t u = load64_be(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -120,11 +114,11 @@ static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
   uint64_t s1 = tmp[2U];
   uint64_t s2 = tmp[1U];
   uint64_t s3 = tmp[0U];
-  uint64_t f00 = s0 & (uint64_t)0xfffffffffffffU;
-  uint64_t f10 = s0 >> (uint32_t)52U | (s1 & (uint64_t)0xffffffffffU) << (uint32_t)12U;
-  uint64_t f20 = s1 >> (uint32_t)40U | (s2 & (uint64_t)0xfffffffU) << (uint32_t)24U;
-  uint64_t f30 = s2 >> (uint32_t)28U | (s3 & (uint64_t)0xffffU) << (uint32_t)36U;
-  uint64_t f40 = s3 >> (uint32_t)16U;
+  uint64_t f00 = s0 & 0xfffffffffffffULL;
+  uint64_t f10 = s0 >> 52U | (s1 & 0xffffffffffULL) << 12U;
+  uint64_t f20 = s1 >> 40U | (s2 & 0xfffffffULL) << 24U;
+  uint64_t f30 = s2 >> 28U | (s3 & 0xffffULL) << 36U;
+  uint64_t f40 = s3 >> 16U;
   uint64_t f0 = f00;
   uint64_t f1 = f10;
   uint64_t f2 = f20;
@@ -148,11 +142,11 @@ static inline bool Hacl_K256_Field_load_felem_lt_prime_vartime(uint64_t *f, uint
   bool
   is_ge_p =
     f0
-    >= (uint64_t)0xffffefffffc2fU
-    && f1 == (uint64_t)0xfffffffffffffU
-    && f2 == (uint64_t)0xfffffffffffffU
-    && f3 == (uint64_t)0xfffffffffffffU
-    && f4 == (uint64_t)0xffffffffffffU;
+    >= 0xffffefffffc2fULL
+    && f1 == 0xfffffffffffffULL
+    && f2 == 0xfffffffffffffULL
+    && f3 == 0xfffffffffffffULL
+    && f4 == 0xffffffffffffULL;
   return !is_ge_p;
 }
 
@@ -164,10 +158,10 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t o0 = f00 | f10 << (uint32_t)52U;
-  uint64_t o1 = f10 >> (uint32_t)12U | f20 << (uint32_t)40U;
-  uint64_t o2 = f20 >> (uint32_t)24U | f30 << (uint32_t)28U;
-  uint64_t o3 = f30 >> (uint32_t)36U | f4 << (uint32_t)16U;
+  uint64_t o0 = f00 | f10 << 52U;
+  uint64_t o1 = f10 >> 12U | f20 << 40U;
+  uint64_t o2 = f20 >> 24U | f30 << 28U;
+  uint64_t o3 = f30 >> 36U | f4 << 16U;
   uint64_t f0 = o0;
   uint64_t f1 = o1;
   uint64_t f2 = o2;
@@ -176,11 +170,7 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   tmp[1U] = f2;
   tmp[2U] = f1;
   tmp[3U] = f0;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, tmp[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, tmp[i]););
 }
 
 static inline void Hacl_K256_Field_fmul_small_num(uint64_t *out, uint64_t *f, uint64_t num)
@@ -248,11 +238,11 @@ static inline void Hacl_K256_Field_fsub(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r00 = (uint64_t)9007190664804446U * x - b0;
-  uint64_t r10 = (uint64_t)9007199254740990U * x - b1;
-  uint64_t r20 = (uint64_t)9007199254740990U * x - b2;
-  uint64_t r30 = (uint64_t)9007199254740990U * x - b3;
-  uint64_t r40 = (uint64_t)562949953421310U * x - b4;
+  uint64_t r00 = 9007190664804446ULL * x - b0;
+  uint64_t r10 = 9007199254740990ULL * x - b1;
+  uint64_t r20 = 9007199254740990ULL * x - b2;
+  uint64_t r30 = 9007199254740990ULL * x - b3;
+  uint64_t r40 = 562949953421310ULL * x - b4;
   uint64_t r0 = r00;
   uint64_t r1 = r10;
   uint64_t r2 = r20;
@@ -287,7 +277,7 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0,
@@ -298,9 +288,9 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, b4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
@@ -309,12 +299,11 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b2)),
         FStar_UInt128_mul_wide(a3, b1)),
       FStar_UInt128_mul_wide(a4, b0));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, b0);
   FStar_UInt128_uint128
   d6 =
@@ -323,13 +312,12 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b3)),
         FStar_UInt128_mul_wide(a3, b2)),
       FStar_UInt128_mul_wide(a4, b1));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
   FStar_UInt128_uint128
   c5 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a0, b1)),
@@ -343,10 +331,10 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7,
@@ -359,16 +347,15 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
       FStar_UInt128_mul_wide(a4, b3));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f11 = r1;
@@ -389,43 +376,41 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   uint64_t a4 = f[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
-    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * (uint64_t)2U, a3),
-      FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a2));
+    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * 2ULL, a3),
+      FStar_UInt128_mul_wide(a1 * 2ULL, a2));
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, a4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
-  uint64_t a41 = a4 * (uint64_t)2U;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
+  uint64_t a41 = a4 * 2ULL;
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
           FStar_UInt128_mul_wide(a0, a41)),
-        FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a3)),
+        FStar_UInt128_mul_wide(a1 * 2ULL, a3)),
       FStar_UInt128_mul_wide(a2, a2));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, a0);
   FStar_UInt128_uint128
   d6 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(d5, FStar_UInt128_mul_wide(a1, a41)),
-      FStar_UInt128_mul_wide(a2 * (uint64_t)2U, a3));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
-  uint64_t a01 = a0 * (uint64_t)2U;
+      FStar_UInt128_mul_wide(a2 * 2ULL, a3));
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
+  uint64_t a01 = a0 * 2ULL;
   FStar_UInt128_uint128 c5 = FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a01, a1));
   FStar_UInt128_uint128
   d8 =
@@ -434,10 +419,10 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7, FStar_UInt128_mul_wide(a01, a2)),
@@ -445,16 +430,15 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128 d10 = FStar_UInt128_add_mod(d9, FStar_UInt128_mul_wide(a3, a41));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
@@ -475,23 +459,23 @@ static inline void Hacl_K256_Field_fnormalize_weak(uint64_t *out, uint64_t *f)
   uint64_t t2 = f[2U];
   uint64_t t3 = f[3U];
   uint64_t t4 = f[4U];
-  uint64_t x0 = t4 >> (uint32_t)48U;
-  uint64_t t410 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = t4 >> 48U;
+  uint64_t t410 = t4 & 0xffffffffffffULL;
   uint64_t x = x0;
   uint64_t t01 = t0;
   uint64_t t11 = t1;
   uint64_t t21 = t2;
   uint64_t t31 = t3;
   uint64_t t41 = t410;
-  uint64_t t02 = t01 + x * (uint64_t)0x1000003D1U;
-  uint64_t t12 = t11 + (t02 >> (uint32_t)52U);
-  uint64_t t03 = t02 & (uint64_t)0xfffffffffffffU;
-  uint64_t t22 = t21 + (t12 >> (uint32_t)52U);
-  uint64_t t13 = t12 & (uint64_t)0xfffffffffffffU;
-  uint64_t t32 = t31 + (t22 >> (uint32_t)52U);
-  uint64_t t23 = t22 & (uint64_t)0xfffffffffffffU;
-  uint64_t t42 = t41 + (t32 >> (uint32_t)52U);
-  uint64_t t33 = t32 & (uint64_t)0xfffffffffffffU;
+  uint64_t t02 = t01 + x * 0x1000003D1ULL;
+  uint64_t t12 = t11 + (t02 >> 52U);
+  uint64_t t03 = t02 & 0xfffffffffffffULL;
+  uint64_t t22 = t21 + (t12 >> 52U);
+  uint64_t t13 = t12 & 0xfffffffffffffULL;
+  uint64_t t32 = t31 + (t22 >> 52U);
+  uint64_t t23 = t22 & 0xfffffffffffffULL;
+  uint64_t t42 = t41 + (t32 >> 52U);
+  uint64_t t33 = t32 & 0xfffffffffffffULL;
   uint64_t f0 = t03;
   uint64_t f1 = t13;
   uint64_t f2 = t23;
@@ -511,59 +495,59 @@ static inline void Hacl_K256_Field_fnormalize(uint64_t *out, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f40 = f[4U];
-  uint64_t x0 = f40 >> (uint32_t)48U;
-  uint64_t t40 = f40 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = f40 >> 48U;
+  uint64_t t40 = f40 & 0xffffffffffffULL;
   uint64_t x1 = x0;
   uint64_t t00 = f00;
   uint64_t t10 = f10;
   uint64_t t20 = f20;
   uint64_t t30 = f30;
   uint64_t t42 = t40;
-  uint64_t t01 = t00 + x1 * (uint64_t)0x1000003D1U;
-  uint64_t t110 = t10 + (t01 >> (uint32_t)52U);
-  uint64_t t020 = t01 & (uint64_t)0xfffffffffffffU;
-  uint64_t t210 = t20 + (t110 >> (uint32_t)52U);
-  uint64_t t120 = t110 & (uint64_t)0xfffffffffffffU;
-  uint64_t t310 = t30 + (t210 >> (uint32_t)52U);
-  uint64_t t220 = t210 & (uint64_t)0xfffffffffffffU;
-  uint64_t t410 = t42 + (t310 >> (uint32_t)52U);
-  uint64_t t320 = t310 & (uint64_t)0xfffffffffffffU;
+  uint64_t t01 = t00 + x1 * 0x1000003D1ULL;
+  uint64_t t110 = t10 + (t01 >> 52U);
+  uint64_t t020 = t01 & 0xfffffffffffffULL;
+  uint64_t t210 = t20 + (t110 >> 52U);
+  uint64_t t120 = t110 & 0xfffffffffffffULL;
+  uint64_t t310 = t30 + (t210 >> 52U);
+  uint64_t t220 = t210 & 0xfffffffffffffULL;
+  uint64_t t410 = t42 + (t310 >> 52U);
+  uint64_t t320 = t310 & 0xfffffffffffffULL;
   uint64_t t0 = t020;
   uint64_t t1 = t120;
   uint64_t t2 = t220;
   uint64_t t3 = t320;
   uint64_t t4 = t410;
-  uint64_t x2 = t4 >> (uint32_t)48U;
-  uint64_t t411 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x2 = t4 >> 48U;
+  uint64_t t411 = t4 & 0xffffffffffffULL;
   uint64_t x = x2;
   uint64_t r0 = t0;
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
   uint64_t r4 = t411;
-  uint64_t m4 = FStar_UInt64_eq_mask(r4, (uint64_t)0xffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(r3, (uint64_t)0xfffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(r2, (uint64_t)0xfffffffffffffU);
-  uint64_t m1 = FStar_UInt64_eq_mask(r1, (uint64_t)0xfffffffffffffU);
-  uint64_t m0 = FStar_UInt64_gte_mask(r0, (uint64_t)0xffffefffffc2fU);
+  uint64_t m4 = FStar_UInt64_eq_mask(r4, 0xffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(r3, 0xfffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(r2, 0xfffffffffffffULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(r1, 0xfffffffffffffULL);
+  uint64_t m0 = FStar_UInt64_gte_mask(r0, 0xffffefffffc2fULL);
   uint64_t is_ge_p_m = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t m_to_one = is_ge_p_m & (uint64_t)1U;
+  uint64_t m_to_one = is_ge_p_m & 1ULL;
   uint64_t x10 = m_to_one | x;
-  uint64_t t010 = r0 + x10 * (uint64_t)0x1000003D1U;
-  uint64_t t11 = r1 + (t010 >> (uint32_t)52U);
-  uint64_t t02 = t010 & (uint64_t)0xfffffffffffffU;
-  uint64_t t21 = r2 + (t11 >> (uint32_t)52U);
-  uint64_t t12 = t11 & (uint64_t)0xfffffffffffffU;
-  uint64_t t31 = r3 + (t21 >> (uint32_t)52U);
-  uint64_t t22 = t21 & (uint64_t)0xfffffffffffffU;
-  uint64_t t41 = r4 + (t31 >> (uint32_t)52U);
-  uint64_t t32 = t31 & (uint64_t)0xfffffffffffffU;
+  uint64_t t010 = r0 + x10 * 0x1000003D1ULL;
+  uint64_t t11 = r1 + (t010 >> 52U);
+  uint64_t t02 = t010 & 0xfffffffffffffULL;
+  uint64_t t21 = r2 + (t11 >> 52U);
+  uint64_t t12 = t11 & 0xfffffffffffffULL;
+  uint64_t t31 = r3 + (t21 >> 52U);
+  uint64_t t22 = t21 & 0xfffffffffffffULL;
+  uint64_t t41 = r4 + (t31 >> 52U);
+  uint64_t t32 = t31 & 0xfffffffffffffULL;
   uint64_t s0 = t02;
   uint64_t s1 = t12;
   uint64_t s2 = t22;
   uint64_t s3 = t32;
   uint64_t s4 = t41;
-  uint64_t t412 = s4 & (uint64_t)0xffffffffffffU;
+  uint64_t t412 = s4 & 0xffffffffffffULL;
   uint64_t k0 = s0;
   uint64_t k1 = s1;
   uint64_t k2 = s2;
@@ -590,11 +574,11 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
     uint64_t a2 = f[2U];
     uint64_t a3 = f[3U];
     uint64_t a4 = f[4U];
-    uint64_t r0 = (uint64_t)9007190664804446U - a0;
-    uint64_t r1 = (uint64_t)9007199254740990U - a1;
-    uint64_t r2 = (uint64_t)9007199254740990U - a2;
-    uint64_t r3 = (uint64_t)9007199254740990U - a3;
-    uint64_t r4 = (uint64_t)562949953421310U - a4;
+    uint64_t r0 = 9007190664804446ULL - a0;
+    uint64_t r1 = 9007199254740990ULL - a1;
+    uint64_t r2 = 9007199254740990ULL - a2;
+    uint64_t r3 = 9007199254740990ULL - a3;
+    uint64_t r4 = 562949953421310ULL - a4;
     uint64_t f0 = r0;
     uint64_t f1 = r1;
     uint64_t f2 = r2;
@@ -612,7 +596,7 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -620,8 +604,8 @@ static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uin
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)5U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 5U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -633,29 +617,29 @@ static inline void Hacl_Impl_K256_Finv_fexp_223_23(uint64_t *out, uint64_t *x2,
   uint64_t x22[5U] = { 0U };
   uint64_t x44[5U] = { 0U };
   uint64_t x88[5U] = { 0U };
-  Hacl_Impl_K256_Finv_fsquare_times(x2, f, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x2, f, 1U);
   Hacl_K256_Field_fmul(x2, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, 1U);
   Hacl_K256_Field_fmul(x3, x3, f);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x3, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x3, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times(x22, out, (uint32_t)11U);
+  Hacl_Impl_K256_Finv_fsquare_times(x22, out, 11U);
   Hacl_K256_Field_fmul(x22, x22, out);
-  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, (uint32_t)22U);
+  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, 22U);
   Hacl_K256_Field_fmul(x44, x44, x22);
-  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, 44U);
   Hacl_K256_Field_fmul(x88, x88, x44);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x88, (uint32_t)88U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x88, 88U);
   Hacl_K256_Field_fmul(out, out, x88);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 44U);
   Hacl_K256_Field_fmul(out, out, x44);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)23U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 23U);
   Hacl_K256_Field_fmul(out, out, x22);
 }
 
@@ -663,11 +647,11 @@ static inline void Hacl_Impl_K256_Finv_finv(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)5U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 5U);
   Hacl_K256_Field_fmul(out, out, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, f);
 }
 
@@ -675,9 +659,9 @@ static inline void Hacl_Impl_K256_Finv_fsqrt(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)6U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 6U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
 }
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Ed25519_PrecompTable.h b/include/msvc/internal/Hacl_Ed25519_PrecompTable.h
index 77d2244c..a20cd912 100644
--- a/include/msvc/internal/Hacl_Ed25519_PrecompTable.h
+++ b/include/msvc/internal/Hacl_Ed25519_PrecompTable.h
@@ -39,655 +39,491 @@ static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)13559344787725U, (uint64_t)2051621493703448U,
-    (uint64_t)1947659315640708U, (uint64_t)626856790370168U, (uint64_t)1592804284034836U,
-    (uint64_t)1781728767459187U, (uint64_t)278818420518009U, (uint64_t)2038030359908351U,
-    (uint64_t)910625973862690U, (uint64_t)471887343142239U, (uint64_t)1298543306606048U,
-    (uint64_t)794147365642417U, (uint64_t)129968992326749U, (uint64_t)523140861678572U,
-    (uint64_t)1166419653909231U, (uint64_t)2009637196928390U, (uint64_t)1288020222395193U,
-    (uint64_t)1007046974985829U, (uint64_t)208981102651386U, (uint64_t)2074009315253380U,
-    (uint64_t)1564056062071967U, (uint64_t)276822668750618U, (uint64_t)206621292512572U,
-    (uint64_t)470304361809269U, (uint64_t)895215438398493U, (uint64_t)1527859053868686U,
-    (uint64_t)1624967223409369U, (uint64_t)811821865979736U, (uint64_t)350450534838340U,
-    (uint64_t)219143807921807U, (uint64_t)507994540371254U, (uint64_t)986513794574720U,
-    (uint64_t)1142661369967121U, (uint64_t)621278293399257U, (uint64_t)556189161519781U,
-    (uint64_t)351964007865066U, (uint64_t)2011573453777822U, (uint64_t)1367125527151537U,
-    (uint64_t)1691316722438196U, (uint64_t)731328817345164U, (uint64_t)1284781192709232U,
-    (uint64_t)478439299539269U, (uint64_t)204842178076429U, (uint64_t)2085125369913651U,
-    (uint64_t)1980773492792985U, (uint64_t)1480264409524940U, (uint64_t)688389585376233U,
-    (uint64_t)612962643526972U, (uint64_t)165595382536676U, (uint64_t)1850300069212263U,
-    (uint64_t)1176357203491551U, (uint64_t)1880164984292321U, (uint64_t)10786153104736U,
-    (uint64_t)1242293560510203U, (uint64_t)1358399951884084U, (uint64_t)1901358796610357U,
-    (uint64_t)1385092558795806U, (uint64_t)1734893785311348U, (uint64_t)2046201851951191U,
-    (uint64_t)1233811309557352U, (uint64_t)1531160168656129U, (uint64_t)1543287181303358U,
-    (uint64_t)516121446374119U, (uint64_t)723422668089935U, (uint64_t)1228176774959679U,
-    (uint64_t)1598014722726267U, (uint64_t)1630810326658412U, (uint64_t)1343833067463760U,
-    (uint64_t)1024397964362099U, (uint64_t)1157142161346781U, (uint64_t)56422174971792U,
-    (uint64_t)544901687297092U, (uint64_t)1291559028869009U, (uint64_t)1336918672345120U,
-    (uint64_t)1390874603281353U, (uint64_t)1127199512010904U, (uint64_t)992644979940964U,
-    (uint64_t)1035213479783573U, (uint64_t)36043651196100U, (uint64_t)1220961519321221U,
-    (uint64_t)1348190007756977U, (uint64_t)579420200329088U, (uint64_t)1703819961008985U,
-    (uint64_t)1993919213460047U, (uint64_t)2225080008232251U, (uint64_t)392785893702372U,
-    (uint64_t)464312521482632U, (uint64_t)1224525362116057U, (uint64_t)810394248933036U,
-    (uint64_t)932513521649107U, (uint64_t)592314953488703U, (uint64_t)586334603791548U,
-    (uint64_t)1310888126096549U, (uint64_t)650842674074281U, (uint64_t)1596447001791059U,
-    (uint64_t)2086767406328284U, (uint64_t)1866377645879940U, (uint64_t)1721604362642743U,
-    (uint64_t)738502322566890U, (uint64_t)1851901097729689U, (uint64_t)1158347571686914U,
-    (uint64_t)2023626733470827U, (uint64_t)329625404653699U, (uint64_t)563555875598551U,
-    (uint64_t)516554588079177U, (uint64_t)1134688306104598U, (uint64_t)186301198420809U,
-    (uint64_t)1339952213563300U, (uint64_t)643605614625891U, (uint64_t)1947505332718043U,
-    (uint64_t)1722071694852824U, (uint64_t)601679570440694U, (uint64_t)1821275721236351U,
-    (uint64_t)1808307842870389U, (uint64_t)1654165204015635U, (uint64_t)1457334100715245U,
-    (uint64_t)217784948678349U, (uint64_t)1820622417674817U, (uint64_t)1946121178444661U,
-    (uint64_t)597980757799332U, (uint64_t)1745271227710764U, (uint64_t)2010952890941980U,
-    (uint64_t)339811849696648U, (uint64_t)1066120666993872U, (uint64_t)261276166508990U,
-    (uint64_t)323098645774553U, (uint64_t)207454744271283U, (uint64_t)941448672977675U,
-    (uint64_t)71890920544375U, (uint64_t)840849789313357U, (uint64_t)1223996070717926U,
-    (uint64_t)196832550853408U, (uint64_t)115986818309231U, (uint64_t)1586171527267675U,
-    (uint64_t)1666169080973450U, (uint64_t)1456454731176365U, (uint64_t)44467854369003U,
-    (uint64_t)2149656190691480U, (uint64_t)283446383597589U, (uint64_t)2040542647729974U,
-    (uint64_t)305705593840224U, (uint64_t)475315822269791U, (uint64_t)648133452550632U,
-    (uint64_t)169218658835720U, (uint64_t)24960052338251U, (uint64_t)938907951346766U,
-    (uint64_t)425970950490510U, (uint64_t)1037622011013183U, (uint64_t)1026882082708180U,
-    (uint64_t)1635699409504916U, (uint64_t)1644776942870488U, (uint64_t)2151820331175914U,
-    (uint64_t)824120674069819U, (uint64_t)835744976610113U, (uint64_t)1991271032313190U,
-    (uint64_t)96507354724855U, (uint64_t)400645405133260U, (uint64_t)343728076650825U,
-    (uint64_t)1151585441385566U, (uint64_t)1403339955333520U, (uint64_t)230186314139774U,
-    (uint64_t)1736248861506714U, (uint64_t)1010804378904572U, (uint64_t)1394932289845636U,
-    (uint64_t)1901351256960852U, (uint64_t)2187471430089807U, (uint64_t)1003853262342670U,
-    (uint64_t)1327743396767461U, (uint64_t)1465160415991740U, (uint64_t)366625359144534U,
-    (uint64_t)1534791405247604U, (uint64_t)1790905930250187U, (uint64_t)1255484115292738U,
-    (uint64_t)2223291365520443U, (uint64_t)210967717407408U, (uint64_t)26722916813442U,
-    (uint64_t)1919574361907910U, (uint64_t)468825088280256U, (uint64_t)2230011775946070U,
-    (uint64_t)1628365642214479U, (uint64_t)568871869234932U, (uint64_t)1066987968780488U,
-    (uint64_t)1692242903745558U, (uint64_t)1678903997328589U, (uint64_t)214262165888021U,
-    (uint64_t)1929686748607204U, (uint64_t)1790138967989670U, (uint64_t)1790261616022076U,
-    (uint64_t)1559824537553112U, (uint64_t)1230364591311358U, (uint64_t)147531939886346U,
-    (uint64_t)1528207085815487U, (uint64_t)477957922927292U, (uint64_t)285670243881618U,
-    (uint64_t)264430080123332U, (uint64_t)1163108160028611U, (uint64_t)373201522147371U,
-    (uint64_t)34903775270979U, (uint64_t)1750870048600662U, (uint64_t)1319328308741084U,
-    (uint64_t)1547548634278984U, (uint64_t)1691259592202927U, (uint64_t)2247758037259814U,
-    (uint64_t)329611399953677U, (uint64_t)1385555496268877U, (uint64_t)2242438354031066U,
-    (uint64_t)1329523854843632U, (uint64_t)399895373846055U, (uint64_t)678005703193452U,
-    (uint64_t)1496357700997771U, (uint64_t)71909969781942U, (uint64_t)1515391418612349U,
-    (uint64_t)470110837888178U, (uint64_t)1981307309417466U, (uint64_t)1259888737412276U,
-    (uint64_t)669991710228712U, (uint64_t)1048546834514303U, (uint64_t)1678323291295512U,
-    (uint64_t)2172033978088071U, (uint64_t)1529278455500556U, (uint64_t)901984601941894U,
-    (uint64_t)780867622403807U, (uint64_t)550105677282793U, (uint64_t)975860231176136U,
-    (uint64_t)525188281689178U, (uint64_t)49966114807992U, (uint64_t)1776449263836645U,
-    (uint64_t)267851776380338U, (uint64_t)2225969494054620U, (uint64_t)2016794225789822U,
-    (uint64_t)1186108678266608U, (uint64_t)1023083271408882U, (uint64_t)1119289418565906U,
-    (uint64_t)1248185897348801U, (uint64_t)1846081539082697U, (uint64_t)23756429626075U,
-    (uint64_t)1441999021105403U, (uint64_t)724497586552825U, (uint64_t)1287761623605379U,
-    (uint64_t)685303359654224U, (uint64_t)2217156930690570U, (uint64_t)163769288918347U,
-    (uint64_t)1098423278284094U, (uint64_t)1391470723006008U, (uint64_t)570700152353516U,
-    (uint64_t)744804507262556U, (uint64_t)2200464788609495U, (uint64_t)624141899161992U,
-    (uint64_t)2249570166275684U, (uint64_t)378706441983561U, (uint64_t)122486379999375U,
-    (uint64_t)430741162798924U, (uint64_t)113847463452574U, (uint64_t)266250457840685U,
-    (uint64_t)2120743625072743U, (uint64_t)222186221043927U, (uint64_t)1964290018305582U,
-    (uint64_t)1435278008132477U, (uint64_t)1670867456663734U, (uint64_t)2009989552599079U,
-    (uint64_t)1348024113448744U, (uint64_t)1158423886300455U, (uint64_t)1356467152691569U,
-    (uint64_t)306943042363674U, (uint64_t)926879628664255U, (uint64_t)1349295689598324U,
-    (uint64_t)725558330071205U, (uint64_t)536569987519948U, (uint64_t)116436990335366U,
-    (uint64_t)1551888573800376U, (uint64_t)2044698345945451U, (uint64_t)104279940291311U,
-    (uint64_t)251526570943220U, (uint64_t)754735828122925U, (uint64_t)33448073576361U,
-    (uint64_t)994605876754543U, (uint64_t)546007584022006U, (uint64_t)2217332798409487U,
-    (uint64_t)706477052561591U, (uint64_t)131174619428653U, (uint64_t)2148698284087243U,
-    (uint64_t)239290486205186U, (uint64_t)2161325796952184U, (uint64_t)1713452845607994U,
-    (uint64_t)1297861562938913U, (uint64_t)1779539876828514U, (uint64_t)1926559018603871U,
-    (uint64_t)296485747893968U, (uint64_t)1859208206640686U, (uint64_t)538513979002718U,
-    (uint64_t)103998826506137U, (uint64_t)2025375396538469U, (uint64_t)1370680785701206U,
-    (uint64_t)1698557311253840U, (uint64_t)1411096399076595U, (uint64_t)2132580530813677U,
-    (uint64_t)2071564345845035U, (uint64_t)498581428556735U, (uint64_t)1136010486691371U,
-    (uint64_t)1927619356993146U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL,
+    626856790370168ULL, 1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL,
+    2038030359908351ULL, 910625973862690ULL, 471887343142239ULL, 1298543306606048ULL,
+    794147365642417ULL, 129968992326749ULL, 523140861678572ULL, 1166419653909231ULL,
+    2009637196928390ULL, 1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL,
+    2074009315253380ULL, 1564056062071967ULL, 276822668750618ULL, 206621292512572ULL,
+    470304361809269ULL, 895215438398493ULL, 1527859053868686ULL, 1624967223409369ULL,
+    811821865979736ULL, 350450534838340ULL, 219143807921807ULL, 507994540371254ULL,
+    986513794574720ULL, 1142661369967121ULL, 621278293399257ULL, 556189161519781ULL,
+    351964007865066ULL, 2011573453777822ULL, 1367125527151537ULL, 1691316722438196ULL,
+    731328817345164ULL, 1284781192709232ULL, 478439299539269ULL, 204842178076429ULL,
+    2085125369913651ULL, 1980773492792985ULL, 1480264409524940ULL, 688389585376233ULL,
+    612962643526972ULL, 165595382536676ULL, 1850300069212263ULL, 1176357203491551ULL,
+    1880164984292321ULL, 10786153104736ULL, 1242293560510203ULL, 1358399951884084ULL,
+    1901358796610357ULL, 1385092558795806ULL, 1734893785311348ULL, 2046201851951191ULL,
+    1233811309557352ULL, 1531160168656129ULL, 1543287181303358ULL, 516121446374119ULL,
+    723422668089935ULL, 1228176774959679ULL, 1598014722726267ULL, 1630810326658412ULL,
+    1343833067463760ULL, 1024397964362099ULL, 1157142161346781ULL, 56422174971792ULL,
+    544901687297092ULL, 1291559028869009ULL, 1336918672345120ULL, 1390874603281353ULL,
+    1127199512010904ULL, 992644979940964ULL, 1035213479783573ULL, 36043651196100ULL,
+    1220961519321221ULL, 1348190007756977ULL, 579420200329088ULL, 1703819961008985ULL,
+    1993919213460047ULL, 2225080008232251ULL, 392785893702372ULL, 464312521482632ULL,
+    1224525362116057ULL, 810394248933036ULL, 932513521649107ULL, 592314953488703ULL,
+    586334603791548ULL, 1310888126096549ULL, 650842674074281ULL, 1596447001791059ULL,
+    2086767406328284ULL, 1866377645879940ULL, 1721604362642743ULL, 738502322566890ULL,
+    1851901097729689ULL, 1158347571686914ULL, 2023626733470827ULL, 329625404653699ULL,
+    563555875598551ULL, 516554588079177ULL, 1134688306104598ULL, 186301198420809ULL,
+    1339952213563300ULL, 643605614625891ULL, 1947505332718043ULL, 1722071694852824ULL,
+    601679570440694ULL, 1821275721236351ULL, 1808307842870389ULL, 1654165204015635ULL,
+    1457334100715245ULL, 217784948678349ULL, 1820622417674817ULL, 1946121178444661ULL,
+    597980757799332ULL, 1745271227710764ULL, 2010952890941980ULL, 339811849696648ULL,
+    1066120666993872ULL, 261276166508990ULL, 323098645774553ULL, 207454744271283ULL,
+    941448672977675ULL, 71890920544375ULL, 840849789313357ULL, 1223996070717926ULL,
+    196832550853408ULL, 115986818309231ULL, 1586171527267675ULL, 1666169080973450ULL,
+    1456454731176365ULL, 44467854369003ULL, 2149656190691480ULL, 283446383597589ULL,
+    2040542647729974ULL, 305705593840224ULL, 475315822269791ULL, 648133452550632ULL,
+    169218658835720ULL, 24960052338251ULL, 938907951346766ULL, 425970950490510ULL,
+    1037622011013183ULL, 1026882082708180ULL, 1635699409504916ULL, 1644776942870488ULL,
+    2151820331175914ULL, 824120674069819ULL, 835744976610113ULL, 1991271032313190ULL,
+    96507354724855ULL, 400645405133260ULL, 343728076650825ULL, 1151585441385566ULL,
+    1403339955333520ULL, 230186314139774ULL, 1736248861506714ULL, 1010804378904572ULL,
+    1394932289845636ULL, 1901351256960852ULL, 2187471430089807ULL, 1003853262342670ULL,
+    1327743396767461ULL, 1465160415991740ULL, 366625359144534ULL, 1534791405247604ULL,
+    1790905930250187ULL, 1255484115292738ULL, 2223291365520443ULL, 210967717407408ULL,
+    26722916813442ULL, 1919574361907910ULL, 468825088280256ULL, 2230011775946070ULL,
+    1628365642214479ULL, 568871869234932ULL, 1066987968780488ULL, 1692242903745558ULL,
+    1678903997328589ULL, 214262165888021ULL, 1929686748607204ULL, 1790138967989670ULL,
+    1790261616022076ULL, 1559824537553112ULL, 1230364591311358ULL, 147531939886346ULL,
+    1528207085815487ULL, 477957922927292ULL, 285670243881618ULL, 264430080123332ULL,
+    1163108160028611ULL, 373201522147371ULL, 34903775270979ULL, 1750870048600662ULL,
+    1319328308741084ULL, 1547548634278984ULL, 1691259592202927ULL, 2247758037259814ULL,
+    329611399953677ULL, 1385555496268877ULL, 2242438354031066ULL, 1329523854843632ULL,
+    399895373846055ULL, 678005703193452ULL, 1496357700997771ULL, 71909969781942ULL,
+    1515391418612349ULL, 470110837888178ULL, 1981307309417466ULL, 1259888737412276ULL,
+    669991710228712ULL, 1048546834514303ULL, 1678323291295512ULL, 2172033978088071ULL,
+    1529278455500556ULL, 901984601941894ULL, 780867622403807ULL, 550105677282793ULL,
+    975860231176136ULL, 525188281689178ULL, 49966114807992ULL, 1776449263836645ULL,
+    267851776380338ULL, 2225969494054620ULL, 2016794225789822ULL, 1186108678266608ULL,
+    1023083271408882ULL, 1119289418565906ULL, 1248185897348801ULL, 1846081539082697ULL,
+    23756429626075ULL, 1441999021105403ULL, 724497586552825ULL, 1287761623605379ULL,
+    685303359654224ULL, 2217156930690570ULL, 163769288918347ULL, 1098423278284094ULL,
+    1391470723006008ULL, 570700152353516ULL, 744804507262556ULL, 2200464788609495ULL,
+    624141899161992ULL, 2249570166275684ULL, 378706441983561ULL, 122486379999375ULL,
+    430741162798924ULL, 113847463452574ULL, 266250457840685ULL, 2120743625072743ULL,
+    222186221043927ULL, 1964290018305582ULL, 1435278008132477ULL, 1670867456663734ULL,
+    2009989552599079ULL, 1348024113448744ULL, 1158423886300455ULL, 1356467152691569ULL,
+    306943042363674ULL, 926879628664255ULL, 1349295689598324ULL, 725558330071205ULL,
+    536569987519948ULL, 116436990335366ULL, 1551888573800376ULL, 2044698345945451ULL,
+    104279940291311ULL, 251526570943220ULL, 754735828122925ULL, 33448073576361ULL,
+    994605876754543ULL, 546007584022006ULL, 2217332798409487ULL, 706477052561591ULL,
+    131174619428653ULL, 2148698284087243ULL, 239290486205186ULL, 2161325796952184ULL,
+    1713452845607994ULL, 1297861562938913ULL, 1779539876828514ULL, 1926559018603871ULL,
+    296485747893968ULL, 1859208206640686ULL, 538513979002718ULL, 103998826506137ULL,
+    2025375396538469ULL, 1370680785701206ULL, 1698557311253840ULL, 1411096399076595ULL,
+    2132580530813677ULL, 2071564345845035ULL, 498581428556735ULL, 1136010486691371ULL,
+    1927619356993146ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)557549315715710U, (uint64_t)196756086293855U,
-    (uint64_t)846062225082495U, (uint64_t)1865068224838092U, (uint64_t)991112090754908U,
-    (uint64_t)522916421512828U, (uint64_t)2098523346722375U, (uint64_t)1135633221747012U,
-    (uint64_t)858420432114866U, (uint64_t)186358544306082U, (uint64_t)1044420411868480U,
-    (uint64_t)2080052304349321U, (uint64_t)557301814716724U, (uint64_t)1305130257814057U,
-    (uint64_t)2126012765451197U, (uint64_t)1441004402875101U, (uint64_t)353948968859203U,
-    (uint64_t)470765987164835U, (uint64_t)1507675957683570U, (uint64_t)1086650358745097U,
-    (uint64_t)1911913434398388U, (uint64_t)66086091117182U, (uint64_t)1137511952425971U,
-    (uint64_t)36958263512141U, (uint64_t)2193310025325256U, (uint64_t)1085191426269045U,
-    (uint64_t)1232148267909446U, (uint64_t)1449894406170117U, (uint64_t)1241416717139557U,
-    (uint64_t)1940876999212868U, (uint64_t)829758415918121U, (uint64_t)309608450373449U,
-    (uint64_t)2228398547683851U, (uint64_t)1580623271960188U, (uint64_t)1675601502456740U,
-    (uint64_t)1360363115493548U, (uint64_t)1098397313096815U, (uint64_t)1809255384359797U,
-    (uint64_t)1458261916834384U, (uint64_t)210682545649705U, (uint64_t)1606836641068115U,
-    (uint64_t)1230478270405318U, (uint64_t)1843192771547802U, (uint64_t)1794596343564051U,
-    (uint64_t)229060710252162U, (uint64_t)2169742775467181U, (uint64_t)701467067318072U,
-    (uint64_t)696018499035555U, (uint64_t)521051885339807U, (uint64_t)158329567901874U,
-    (uint64_t)740426481832143U, (uint64_t)1369811177301441U, (uint64_t)503351589084015U,
-    (uint64_t)1781114827942261U, (uint64_t)1650493549693035U, (uint64_t)2174562418345156U,
-    (uint64_t)456517194809244U, (uint64_t)2052761522121179U, (uint64_t)2233342271123682U,
-    (uint64_t)1445872925177435U, (uint64_t)1131882576902813U, (uint64_t)220765848055241U,
-    (uint64_t)1280259961403769U, (uint64_t)1581497080160712U, (uint64_t)1477441080108824U,
-    (uint64_t)218428165202767U, (uint64_t)1970598141278907U, (uint64_t)643366736173069U,
-    (uint64_t)2167909426804014U, (uint64_t)834993711408259U, (uint64_t)1922437166463212U,
-    (uint64_t)1900036281472252U, (uint64_t)513794844386304U, (uint64_t)1297904164900114U,
-    (uint64_t)1147626295373268U, (uint64_t)1910101606251299U, (uint64_t)182933838633381U,
-    (uint64_t)806229530787362U, (uint64_t)155511666433200U, (uint64_t)290522463375462U,
-    (uint64_t)534373523491751U, (uint64_t)1302938814480515U, (uint64_t)1664979184120445U,
-    (uint64_t)304235649499423U, (uint64_t)339284524318609U, (uint64_t)1881717946973483U,
-    (uint64_t)1670802286833842U, (uint64_t)2223637120675737U, (uint64_t)135818919485814U,
-    (uint64_t)1144856572842792U, (uint64_t)2234981613434386U, (uint64_t)963917024969826U,
-    (uint64_t)402275378284993U, (uint64_t)141532417412170U, (uint64_t)921537468739387U,
-    (uint64_t)963905069722607U, (uint64_t)1405442890733358U, (uint64_t)1567763927164655U,
-    (uint64_t)1664776329195930U, (uint64_t)2095924165508507U, (uint64_t)994243110271379U,
-    (uint64_t)1243925610609353U, (uint64_t)1029845815569727U, (uint64_t)1001968867985629U,
-    (uint64_t)170368934002484U, (uint64_t)1100906131583801U, (uint64_t)1825190326449569U,
-    (uint64_t)1462285121182096U, (uint64_t)1545240767016377U, (uint64_t)797859025652273U,
-    (uint64_t)1062758326657530U, (uint64_t)1125600735118266U, (uint64_t)739325756774527U,
-    (uint64_t)1420144485966996U, (uint64_t)1915492743426702U, (uint64_t)752968196344993U,
-    (uint64_t)882156396938351U, (uint64_t)1909097048763227U, (uint64_t)849058590685611U,
-    (uint64_t)840754951388500U, (uint64_t)1832926948808323U, (uint64_t)2023317100075297U,
-    (uint64_t)322382745442827U, (uint64_t)1569741341737601U, (uint64_t)1678986113194987U,
-    (uint64_t)757598994581938U, (uint64_t)29678659580705U, (uint64_t)1239680935977986U,
-    (uint64_t)1509239427168474U, (uint64_t)1055981929287006U, (uint64_t)1894085471158693U,
-    (uint64_t)916486225488490U, (uint64_t)642168890366120U, (uint64_t)300453362620010U,
-    (uint64_t)1858797242721481U, (uint64_t)2077989823177130U, (uint64_t)510228455273334U,
-    (uint64_t)1473284798689270U, (uint64_t)5173934574301U, (uint64_t)765285232030050U,
-    (uint64_t)1007154707631065U, (uint64_t)1862128712885972U, (uint64_t)168873464821340U,
-    (uint64_t)1967853269759318U, (uint64_t)1489896018263031U, (uint64_t)592451806166369U,
-    (uint64_t)1242298565603883U, (uint64_t)1838918921339058U, (uint64_t)697532763910695U,
-    (uint64_t)294335466239059U, (uint64_t)135687058387449U, (uint64_t)2133734403874176U,
-    (uint64_t)2121911143127699U, (uint64_t)20222476737364U, (uint64_t)1200824626476747U,
-    (uint64_t)1397731736540791U, (uint64_t)702378430231418U, (uint64_t)59059527640068U,
-    (uint64_t)460992547183981U, (uint64_t)1016125857842765U, (uint64_t)1273530839608957U,
-    (uint64_t)96724128829301U, (uint64_t)1313433042425233U, (uint64_t)3543822857227U,
-    (uint64_t)761975685357118U, (uint64_t)110417360745248U, (uint64_t)1079634164577663U,
-    (uint64_t)2044574510020457U, (uint64_t)338709058603120U, (uint64_t)94541336042799U,
-    (uint64_t)127963233585039U, (uint64_t)94427896272258U, (uint64_t)1143501979342182U,
-    (uint64_t)1217958006212230U, (uint64_t)2153887831492134U, (uint64_t)1519219513255575U,
-    (uint64_t)251793195454181U, (uint64_t)392517349345200U, (uint64_t)1507033011868881U,
-    (uint64_t)2208494254670752U, (uint64_t)1364389582694359U, (uint64_t)2214069430728063U,
-    (uint64_t)1272814257105752U, (uint64_t)741450148906352U, (uint64_t)1105776675555685U,
-    (uint64_t)824447222014984U, (uint64_t)528745219306376U, (uint64_t)589427609121575U,
-    (uint64_t)1501786838809155U, (uint64_t)379067373073147U, (uint64_t)184909476589356U,
-    (uint64_t)1346887560616185U, (uint64_t)1932023742314082U, (uint64_t)1633302311869264U,
-    (uint64_t)1685314821133069U, (uint64_t)1836610282047884U, (uint64_t)1595571594397150U,
-    (uint64_t)615441688872198U, (uint64_t)1926435616702564U, (uint64_t)235632180396480U,
-    (uint64_t)1051918343571810U, (uint64_t)2150570051687050U, (uint64_t)879198845408738U,
-    (uint64_t)1443966275205464U, (uint64_t)481362545245088U, (uint64_t)512807443532642U,
-    (uint64_t)641147578283480U, (uint64_t)1594276116945596U, (uint64_t)1844812743300602U,
-    (uint64_t)2044559316019485U, (uint64_t)202620777969020U, (uint64_t)852992984136302U,
-    (uint64_t)1500869642692910U, (uint64_t)1085216217052457U, (uint64_t)1736294372259758U,
-    (uint64_t)2009666354486552U, (uint64_t)1262389020715248U, (uint64_t)1166527705256867U,
-    (uint64_t)1409917450806036U, (uint64_t)1705819160057637U, (uint64_t)1116901782584378U,
-    (uint64_t)1278460472285473U, (uint64_t)257879811360157U, (uint64_t)40314007176886U,
-    (uint64_t)701309846749639U, (uint64_t)1380457676672777U, (uint64_t)631519782380272U,
-    (uint64_t)1196339573466793U, (uint64_t)955537708940017U, (uint64_t)532725633381530U,
-    (uint64_t)641190593731833U, (uint64_t)7214357153807U, (uint64_t)481922072107983U,
-    (uint64_t)1634886189207352U, (uint64_t)1247659758261633U, (uint64_t)1655809614786430U,
-    (uint64_t)43105797900223U, (uint64_t)76205809912607U, (uint64_t)1936575107455823U,
-    (uint64_t)1107927314642236U, (uint64_t)2199986333469333U, (uint64_t)802974829322510U,
-    (uint64_t)718173128143482U, (uint64_t)539385184235615U, (uint64_t)2075693785611221U,
-    (uint64_t)953281147333690U, (uint64_t)1623571637172587U, (uint64_t)655274535022250U,
-    (uint64_t)1568078078819021U, (uint64_t)101142125049712U, (uint64_t)1488441673350881U,
-    (uint64_t)1457969561944515U, (uint64_t)1492622544287712U, (uint64_t)2041460689280803U,
-    (uint64_t)1961848091392887U, (uint64_t)461003520846938U, (uint64_t)934728060399807U,
-    (uint64_t)117723291519705U, (uint64_t)1027773762863526U, (uint64_t)56765304991567U,
-    (uint64_t)2184028379550479U, (uint64_t)1768767711894030U, (uint64_t)1304432068983172U,
-    (uint64_t)498080974452325U, (uint64_t)2134905654858163U, (uint64_t)1446137427202647U,
-    (uint64_t)551613831549590U, (uint64_t)680288767054205U, (uint64_t)1278113339140386U,
-    (uint64_t)378149431842614U, (uint64_t)80520494426960U, (uint64_t)2080985256348782U,
-    (uint64_t)673432591799820U, (uint64_t)739189463724560U, (uint64_t)1847191452197509U,
-    (uint64_t)527737312871602U, (uint64_t)477609358840073U, (uint64_t)1891633072677946U,
-    (uint64_t)1841456828278466U, (uint64_t)2242502936489002U, (uint64_t)524791829362709U,
-    (uint64_t)276648168514036U, (uint64_t)991706903257619U, (uint64_t)512580228297906U,
-    (uint64_t)1216855104975946U, (uint64_t)67030930303149U, (uint64_t)769593945208213U,
-    (uint64_t)2048873385103577U, (uint64_t)455635274123107U, (uint64_t)2077404927176696U,
-    (uint64_t)1803539634652306U, (uint64_t)1837579953843417U, (uint64_t)1564240068662828U,
-    (uint64_t)1964310918970435U, (uint64_t)832822906252492U, (uint64_t)1516044634195010U,
-    (uint64_t)770571447506889U, (uint64_t)602215152486818U, (uint64_t)1760828333136947U,
-    (uint64_t)730156776030376U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 557549315715710ULL, 196756086293855ULL, 846062225082495ULL,
+    1865068224838092ULL, 991112090754908ULL, 522916421512828ULL, 2098523346722375ULL,
+    1135633221747012ULL, 858420432114866ULL, 186358544306082ULL, 1044420411868480ULL,
+    2080052304349321ULL, 557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL,
+    1441004402875101ULL, 353948968859203ULL, 470765987164835ULL, 1507675957683570ULL,
+    1086650358745097ULL, 1911913434398388ULL, 66086091117182ULL, 1137511952425971ULL,
+    36958263512141ULL, 2193310025325256ULL, 1085191426269045ULL, 1232148267909446ULL,
+    1449894406170117ULL, 1241416717139557ULL, 1940876999212868ULL, 829758415918121ULL,
+    309608450373449ULL, 2228398547683851ULL, 1580623271960188ULL, 1675601502456740ULL,
+    1360363115493548ULL, 1098397313096815ULL, 1809255384359797ULL, 1458261916834384ULL,
+    210682545649705ULL, 1606836641068115ULL, 1230478270405318ULL, 1843192771547802ULL,
+    1794596343564051ULL, 229060710252162ULL, 2169742775467181ULL, 701467067318072ULL,
+    696018499035555ULL, 521051885339807ULL, 158329567901874ULL, 740426481832143ULL,
+    1369811177301441ULL, 503351589084015ULL, 1781114827942261ULL, 1650493549693035ULL,
+    2174562418345156ULL, 456517194809244ULL, 2052761522121179ULL, 2233342271123682ULL,
+    1445872925177435ULL, 1131882576902813ULL, 220765848055241ULL, 1280259961403769ULL,
+    1581497080160712ULL, 1477441080108824ULL, 218428165202767ULL, 1970598141278907ULL,
+    643366736173069ULL, 2167909426804014ULL, 834993711408259ULL, 1922437166463212ULL,
+    1900036281472252ULL, 513794844386304ULL, 1297904164900114ULL, 1147626295373268ULL,
+    1910101606251299ULL, 182933838633381ULL, 806229530787362ULL, 155511666433200ULL,
+    290522463375462ULL, 534373523491751ULL, 1302938814480515ULL, 1664979184120445ULL,
+    304235649499423ULL, 339284524318609ULL, 1881717946973483ULL, 1670802286833842ULL,
+    2223637120675737ULL, 135818919485814ULL, 1144856572842792ULL, 2234981613434386ULL,
+    963917024969826ULL, 402275378284993ULL, 141532417412170ULL, 921537468739387ULL,
+    963905069722607ULL, 1405442890733358ULL, 1567763927164655ULL, 1664776329195930ULL,
+    2095924165508507ULL, 994243110271379ULL, 1243925610609353ULL, 1029845815569727ULL,
+    1001968867985629ULL, 170368934002484ULL, 1100906131583801ULL, 1825190326449569ULL,
+    1462285121182096ULL, 1545240767016377ULL, 797859025652273ULL, 1062758326657530ULL,
+    1125600735118266ULL, 739325756774527ULL, 1420144485966996ULL, 1915492743426702ULL,
+    752968196344993ULL, 882156396938351ULL, 1909097048763227ULL, 849058590685611ULL,
+    840754951388500ULL, 1832926948808323ULL, 2023317100075297ULL, 322382745442827ULL,
+    1569741341737601ULL, 1678986113194987ULL, 757598994581938ULL, 29678659580705ULL,
+    1239680935977986ULL, 1509239427168474ULL, 1055981929287006ULL, 1894085471158693ULL,
+    916486225488490ULL, 642168890366120ULL, 300453362620010ULL, 1858797242721481ULL,
+    2077989823177130ULL, 510228455273334ULL, 1473284798689270ULL, 5173934574301ULL,
+    765285232030050ULL, 1007154707631065ULL, 1862128712885972ULL, 168873464821340ULL,
+    1967853269759318ULL, 1489896018263031ULL, 592451806166369ULL, 1242298565603883ULL,
+    1838918921339058ULL, 697532763910695ULL, 294335466239059ULL, 135687058387449ULL,
+    2133734403874176ULL, 2121911143127699ULL, 20222476737364ULL, 1200824626476747ULL,
+    1397731736540791ULL, 702378430231418ULL, 59059527640068ULL, 460992547183981ULL,
+    1016125857842765ULL, 1273530839608957ULL, 96724128829301ULL, 1313433042425233ULL,
+    3543822857227ULL, 761975685357118ULL, 110417360745248ULL, 1079634164577663ULL,
+    2044574510020457ULL, 338709058603120ULL, 94541336042799ULL, 127963233585039ULL,
+    94427896272258ULL, 1143501979342182ULL, 1217958006212230ULL, 2153887831492134ULL,
+    1519219513255575ULL, 251793195454181ULL, 392517349345200ULL, 1507033011868881ULL,
+    2208494254670752ULL, 1364389582694359ULL, 2214069430728063ULL, 1272814257105752ULL,
+    741450148906352ULL, 1105776675555685ULL, 824447222014984ULL, 528745219306376ULL,
+    589427609121575ULL, 1501786838809155ULL, 379067373073147ULL, 184909476589356ULL,
+    1346887560616185ULL, 1932023742314082ULL, 1633302311869264ULL, 1685314821133069ULL,
+    1836610282047884ULL, 1595571594397150ULL, 615441688872198ULL, 1926435616702564ULL,
+    235632180396480ULL, 1051918343571810ULL, 2150570051687050ULL, 879198845408738ULL,
+    1443966275205464ULL, 481362545245088ULL, 512807443532642ULL, 641147578283480ULL,
+    1594276116945596ULL, 1844812743300602ULL, 2044559316019485ULL, 202620777969020ULL,
+    852992984136302ULL, 1500869642692910ULL, 1085216217052457ULL, 1736294372259758ULL,
+    2009666354486552ULL, 1262389020715248ULL, 1166527705256867ULL, 1409917450806036ULL,
+    1705819160057637ULL, 1116901782584378ULL, 1278460472285473ULL, 257879811360157ULL,
+    40314007176886ULL, 701309846749639ULL, 1380457676672777ULL, 631519782380272ULL,
+    1196339573466793ULL, 955537708940017ULL, 532725633381530ULL, 641190593731833ULL,
+    7214357153807ULL, 481922072107983ULL, 1634886189207352ULL, 1247659758261633ULL,
+    1655809614786430ULL, 43105797900223ULL, 76205809912607ULL, 1936575107455823ULL,
+    1107927314642236ULL, 2199986333469333ULL, 802974829322510ULL, 718173128143482ULL,
+    539385184235615ULL, 2075693785611221ULL, 953281147333690ULL, 1623571637172587ULL,
+    655274535022250ULL, 1568078078819021ULL, 101142125049712ULL, 1488441673350881ULL,
+    1457969561944515ULL, 1492622544287712ULL, 2041460689280803ULL, 1961848091392887ULL,
+    461003520846938ULL, 934728060399807ULL, 117723291519705ULL, 1027773762863526ULL,
+    56765304991567ULL, 2184028379550479ULL, 1768767711894030ULL, 1304432068983172ULL,
+    498080974452325ULL, 2134905654858163ULL, 1446137427202647ULL, 551613831549590ULL,
+    680288767054205ULL, 1278113339140386ULL, 378149431842614ULL, 80520494426960ULL,
+    2080985256348782ULL, 673432591799820ULL, 739189463724560ULL, 1847191452197509ULL,
+    527737312871602ULL, 477609358840073ULL, 1891633072677946ULL, 1841456828278466ULL,
+    2242502936489002ULL, 524791829362709ULL, 276648168514036ULL, 991706903257619ULL,
+    512580228297906ULL, 1216855104975946ULL, 67030930303149ULL, 769593945208213ULL,
+    2048873385103577ULL, 455635274123107ULL, 2077404927176696ULL, 1803539634652306ULL,
+    1837579953843417ULL, 1564240068662828ULL, 1964310918970435ULL, 832822906252492ULL,
+    1516044634195010ULL, 770571447506889ULL, 602215152486818ULL, 1760828333136947ULL,
+    730156776030376ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1129953239743101U, (uint64_t)1240339163956160U,
-    (uint64_t)61002583352401U, (uint64_t)2017604552196030U, (uint64_t)1576867829229863U,
-    (uint64_t)1508654942849389U, (uint64_t)270111619664077U, (uint64_t)1253097517254054U,
-    (uint64_t)721798270973250U, (uint64_t)161923365415298U, (uint64_t)828530877526011U,
-    (uint64_t)1494851059386763U, (uint64_t)662034171193976U, (uint64_t)1315349646974670U,
-    (uint64_t)2199229517308806U, (uint64_t)497078277852673U, (uint64_t)1310507715989956U,
-    (uint64_t)1881315714002105U, (uint64_t)2214039404983803U, (uint64_t)1331036420272667U,
-    (uint64_t)296286697520787U, (uint64_t)1179367922639127U, (uint64_t)25348441419697U,
-    (uint64_t)2200984961703188U, (uint64_t)150893128908291U, (uint64_t)1978614888570852U,
-    (uint64_t)1539657347172046U, (uint64_t)553810196523619U, (uint64_t)246017573977646U,
-    (uint64_t)1440448985385485U, (uint64_t)346049108099981U, (uint64_t)601166606218546U,
-    (uint64_t)855822004151713U, (uint64_t)1957521326383188U, (uint64_t)1114240380430887U,
-    (uint64_t)1349639675122048U, (uint64_t)957375954499040U, (uint64_t)111551795360136U,
-    (uint64_t)618586733648988U, (uint64_t)490708840688866U, (uint64_t)1267002049697314U,
-    (uint64_t)1130723224930028U, (uint64_t)215603029480828U, (uint64_t)1277138555414710U,
-    (uint64_t)1556750324971322U, (uint64_t)1407903521793741U, (uint64_t)1836836546590749U,
-    (uint64_t)576500297444199U, (uint64_t)2074707599091135U, (uint64_t)1826239864380012U,
-    (uint64_t)1935365705983312U, (uint64_t)239501825683682U, (uint64_t)1594236669034980U,
-    (uint64_t)1283078975055301U, (uint64_t)856745636255925U, (uint64_t)1342128647959981U,
-    (uint64_t)945216428379689U, (uint64_t)938746202496410U, (uint64_t)105775123333919U,
-    (uint64_t)1379852610117266U, (uint64_t)1770216827500275U, (uint64_t)1016017267535704U,
-    (uint64_t)1902885522469532U, (uint64_t)994184703730489U, (uint64_t)2227487538793763U,
-    (uint64_t)53155967096055U, (uint64_t)1264120808114350U, (uint64_t)1334928769376729U,
-    (uint64_t)393911808079997U, (uint64_t)826229239481845U, (uint64_t)1827903006733192U,
-    (uint64_t)1449283706008465U, (uint64_t)1258040415217849U, (uint64_t)1641484112868370U,
-    (uint64_t)1140150841968176U, (uint64_t)391113338021313U, (uint64_t)162138667815833U,
-    (uint64_t)742204396566060U, (uint64_t)110709233440557U, (uint64_t)90179377432917U,
-    (uint64_t)530511949644489U, (uint64_t)911568635552279U, (uint64_t)135869304780166U,
-    (uint64_t)617719999563692U, (uint64_t)1802525001631319U, (uint64_t)1836394639510490U,
-    (uint64_t)1862739456475085U, (uint64_t)1378284444664288U, (uint64_t)1617882529391756U,
-    (uint64_t)876124429891172U, (uint64_t)1147654641445091U, (uint64_t)1476943370400542U,
-    (uint64_t)688601222759067U, (uint64_t)2120281968990205U, (uint64_t)1387113236912611U,
-    (uint64_t)2125245820685788U, (uint64_t)1030674016350092U, (uint64_t)1594684598654247U,
-    (uint64_t)1165939511879820U, (uint64_t)271499323244173U, (uint64_t)546587254515484U,
-    (uint64_t)945603425742936U, (uint64_t)1242252568170226U, (uint64_t)561598728058142U,
-    (uint64_t)604827091794712U, (uint64_t)19869753585186U, (uint64_t)565367744708915U,
-    (uint64_t)536755754533603U, (uint64_t)1767258313589487U, (uint64_t)907952975936127U,
-    (uint64_t)292851652613937U, (uint64_t)163573546237963U, (uint64_t)837601408384564U,
-    (uint64_t)591996990118301U, (uint64_t)2126051747693057U, (uint64_t)182247548824566U,
-    (uint64_t)908369044122868U, (uint64_t)1335442699947273U, (uint64_t)2234292296528612U,
-    (uint64_t)689537529333034U, (uint64_t)2174778663790714U, (uint64_t)1011407643592667U,
-    (uint64_t)1856130618715473U, (uint64_t)1557437221651741U, (uint64_t)2250285407006102U,
-    (uint64_t)1412384213410827U, (uint64_t)1428042038612456U, (uint64_t)962709733973660U,
-    (uint64_t)313995703125919U, (uint64_t)1844969155869325U, (uint64_t)787716782673657U,
-    (uint64_t)622504542173478U, (uint64_t)930119043384654U, (uint64_t)2128870043952488U,
-    (uint64_t)537781531479523U, (uint64_t)1556666269904940U, (uint64_t)417333635741346U,
-    (uint64_t)1986743846438415U, (uint64_t)877620478041197U, (uint64_t)2205624582983829U,
-    (uint64_t)595260668884488U, (uint64_t)2025159350373157U, (uint64_t)2091659716088235U,
-    (uint64_t)1423634716596391U, (uint64_t)653686638634080U, (uint64_t)1972388399989956U,
-    (uint64_t)795575741798014U, (uint64_t)889240107997846U, (uint64_t)1446156876910732U,
-    (uint64_t)1028507012221776U, (uint64_t)1071697574586478U, (uint64_t)1689630411899691U,
-    (uint64_t)604092816502174U, (uint64_t)1909917373896122U, (uint64_t)1602544877643837U,
-    (uint64_t)1227177032923867U, (uint64_t)62684197535630U, (uint64_t)186146290753883U,
-    (uint64_t)414449055316766U, (uint64_t)1560555880866750U, (uint64_t)157579947096755U,
-    (uint64_t)230526795502384U, (uint64_t)1197673369665894U, (uint64_t)593779215869037U,
-    (uint64_t)214638834474097U, (uint64_t)1796344443484478U, (uint64_t)493550548257317U,
-    (uint64_t)1628442824033694U, (uint64_t)1410811655893495U, (uint64_t)1009361960995171U,
-    (uint64_t)604736219740352U, (uint64_t)392445928555351U, (uint64_t)1254295770295706U,
-    (uint64_t)1958074535046128U, (uint64_t)508699942241019U, (uint64_t)739405911261325U,
-    (uint64_t)1678760393882409U, (uint64_t)517763708545996U, (uint64_t)640040257898722U,
-    (uint64_t)384966810872913U, (uint64_t)407454748380128U, (uint64_t)152604679407451U,
-    (uint64_t)185102854927662U, (uint64_t)1448175503649595U, (uint64_t)100328519208674U,
-    (uint64_t)1153263667012830U, (uint64_t)1643926437586490U, (uint64_t)609632142834154U,
-    (uint64_t)980984004749261U, (uint64_t)855290732258779U, (uint64_t)2186022163021506U,
-    (uint64_t)1254052618626070U, (uint64_t)1850030517182611U, (uint64_t)162348933090207U,
-    (uint64_t)1948712273679932U, (uint64_t)1331832516262191U, (uint64_t)1219400369175863U,
-    (uint64_t)89689036937483U, (uint64_t)1554886057235815U, (uint64_t)1520047528432789U,
-    (uint64_t)81263957652811U, (uint64_t)146612464257008U, (uint64_t)2207945627164163U,
-    (uint64_t)919846660682546U, (uint64_t)1925694087906686U, (uint64_t)2102027292388012U,
-    (uint64_t)887992003198635U, (uint64_t)1817924871537027U, (uint64_t)746660005584342U,
-    (uint64_t)753757153275525U, (uint64_t)91394270908699U, (uint64_t)511837226544151U,
-    (uint64_t)736341543649373U, (uint64_t)1256371121466367U, (uint64_t)1977778299551813U,
-    (uint64_t)817915174462263U, (uint64_t)1602323381418035U, (uint64_t)190035164572930U,
-    (uint64_t)603796401391181U, (uint64_t)2152666873671669U, (uint64_t)1813900316324112U,
-    (uint64_t)1292622433358041U, (uint64_t)888439870199892U, (uint64_t)978918155071994U,
-    (uint64_t)534184417909805U, (uint64_t)466460084317313U, (uint64_t)1275223140288685U,
-    (uint64_t)786407043883517U, (uint64_t)1620520623925754U, (uint64_t)1753625021290269U,
-    (uint64_t)751937175104525U, (uint64_t)905301961820613U, (uint64_t)697059847245437U,
-    (uint64_t)584919033981144U, (uint64_t)1272165506533156U, (uint64_t)1532180021450866U,
-    (uint64_t)1901407354005301U, (uint64_t)1421319720492586U, (uint64_t)2179081609765456U,
-    (uint64_t)2193253156667632U, (uint64_t)1080248329608584U, (uint64_t)2158422436462066U,
-    (uint64_t)759167597017850U, (uint64_t)545759071151285U, (uint64_t)641600428493698U,
-    (uint64_t)943791424499848U, (uint64_t)469571542427864U, (uint64_t)951117845222467U,
-    (uint64_t)1780538594373407U, (uint64_t)614611122040309U, (uint64_t)1354826131886963U,
-    (uint64_t)221898131992340U, (uint64_t)1145699723916219U, (uint64_t)798735379961769U,
-    (uint64_t)1843560518208287U, (uint64_t)1424523160161545U, (uint64_t)205549016574779U,
-    (uint64_t)2239491587362749U, (uint64_t)1918363582399888U, (uint64_t)1292183072788455U,
-    (uint64_t)1783513123192567U, (uint64_t)1584027954317205U, (uint64_t)1890421443925740U,
-    (uint64_t)1718459319874929U, (uint64_t)1522091040748809U, (uint64_t)399467600667219U,
-    (uint64_t)1870973059066576U, (uint64_t)287514433150348U, (uint64_t)1397845311152885U,
-    (uint64_t)1880440629872863U, (uint64_t)709302939340341U, (uint64_t)1813571361109209U,
-    (uint64_t)86598795876860U, (uint64_t)1146964554310612U, (uint64_t)1590956584862432U,
-    (uint64_t)2097004628155559U, (uint64_t)656227622102390U, (uint64_t)1808500445541891U,
-    (uint64_t)958336726523135U, (uint64_t)2007604569465975U, (uint64_t)313504950390997U,
-    (uint64_t)1399686004953620U, (uint64_t)1759732788465234U, (uint64_t)1562539721055836U,
-    (uint64_t)1575722765016293U, (uint64_t)793318366641259U, (uint64_t)443876859384887U,
-    (uint64_t)547308921989704U, (uint64_t)636698687503328U, (uint64_t)2179175835287340U,
-    (uint64_t)498333551718258U, (uint64_t)932248760026176U, (uint64_t)1612395686304653U,
-    (uint64_t)2179774103745626U, (uint64_t)1359658123541018U, (uint64_t)171488501802442U,
-    (uint64_t)1625034951791350U, (uint64_t)520196922773633U, (uint64_t)1873787546341877U,
-    (uint64_t)303457823885368U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL,
+    2017604552196030ULL, 1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL,
+    1253097517254054ULL, 721798270973250ULL, 161923365415298ULL, 828530877526011ULL,
+    1494851059386763ULL, 662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL,
+    497078277852673ULL, 1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL,
+    1331036420272667ULL, 296286697520787ULL, 1179367922639127ULL, 25348441419697ULL,
+    2200984961703188ULL, 150893128908291ULL, 1978614888570852ULL, 1539657347172046ULL,
+    553810196523619ULL, 246017573977646ULL, 1440448985385485ULL, 346049108099981ULL,
+    601166606218546ULL, 855822004151713ULL, 1957521326383188ULL, 1114240380430887ULL,
+    1349639675122048ULL, 957375954499040ULL, 111551795360136ULL, 618586733648988ULL,
+    490708840688866ULL, 1267002049697314ULL, 1130723224930028ULL, 215603029480828ULL,
+    1277138555414710ULL, 1556750324971322ULL, 1407903521793741ULL, 1836836546590749ULL,
+    576500297444199ULL, 2074707599091135ULL, 1826239864380012ULL, 1935365705983312ULL,
+    239501825683682ULL, 1594236669034980ULL, 1283078975055301ULL, 856745636255925ULL,
+    1342128647959981ULL, 945216428379689ULL, 938746202496410ULL, 105775123333919ULL,
+    1379852610117266ULL, 1770216827500275ULL, 1016017267535704ULL, 1902885522469532ULL,
+    994184703730489ULL, 2227487538793763ULL, 53155967096055ULL, 1264120808114350ULL,
+    1334928769376729ULL, 393911808079997ULL, 826229239481845ULL, 1827903006733192ULL,
+    1449283706008465ULL, 1258040415217849ULL, 1641484112868370ULL, 1140150841968176ULL,
+    391113338021313ULL, 162138667815833ULL, 742204396566060ULL, 110709233440557ULL,
+    90179377432917ULL, 530511949644489ULL, 911568635552279ULL, 135869304780166ULL,
+    617719999563692ULL, 1802525001631319ULL, 1836394639510490ULL, 1862739456475085ULL,
+    1378284444664288ULL, 1617882529391756ULL, 876124429891172ULL, 1147654641445091ULL,
+    1476943370400542ULL, 688601222759067ULL, 2120281968990205ULL, 1387113236912611ULL,
+    2125245820685788ULL, 1030674016350092ULL, 1594684598654247ULL, 1165939511879820ULL,
+    271499323244173ULL, 546587254515484ULL, 945603425742936ULL, 1242252568170226ULL,
+    561598728058142ULL, 604827091794712ULL, 19869753585186ULL, 565367744708915ULL,
+    536755754533603ULL, 1767258313589487ULL, 907952975936127ULL, 292851652613937ULL,
+    163573546237963ULL, 837601408384564ULL, 591996990118301ULL, 2126051747693057ULL,
+    182247548824566ULL, 908369044122868ULL, 1335442699947273ULL, 2234292296528612ULL,
+    689537529333034ULL, 2174778663790714ULL, 1011407643592667ULL, 1856130618715473ULL,
+    1557437221651741ULL, 2250285407006102ULL, 1412384213410827ULL, 1428042038612456ULL,
+    962709733973660ULL, 313995703125919ULL, 1844969155869325ULL, 787716782673657ULL,
+    622504542173478ULL, 930119043384654ULL, 2128870043952488ULL, 537781531479523ULL,
+    1556666269904940ULL, 417333635741346ULL, 1986743846438415ULL, 877620478041197ULL,
+    2205624582983829ULL, 595260668884488ULL, 2025159350373157ULL, 2091659716088235ULL,
+    1423634716596391ULL, 653686638634080ULL, 1972388399989956ULL, 795575741798014ULL,
+    889240107997846ULL, 1446156876910732ULL, 1028507012221776ULL, 1071697574586478ULL,
+    1689630411899691ULL, 604092816502174ULL, 1909917373896122ULL, 1602544877643837ULL,
+    1227177032923867ULL, 62684197535630ULL, 186146290753883ULL, 414449055316766ULL,
+    1560555880866750ULL, 157579947096755ULL, 230526795502384ULL, 1197673369665894ULL,
+    593779215869037ULL, 214638834474097ULL, 1796344443484478ULL, 493550548257317ULL,
+    1628442824033694ULL, 1410811655893495ULL, 1009361960995171ULL, 604736219740352ULL,
+    392445928555351ULL, 1254295770295706ULL, 1958074535046128ULL, 508699942241019ULL,
+    739405911261325ULL, 1678760393882409ULL, 517763708545996ULL, 640040257898722ULL,
+    384966810872913ULL, 407454748380128ULL, 152604679407451ULL, 185102854927662ULL,
+    1448175503649595ULL, 100328519208674ULL, 1153263667012830ULL, 1643926437586490ULL,
+    609632142834154ULL, 980984004749261ULL, 855290732258779ULL, 2186022163021506ULL,
+    1254052618626070ULL, 1850030517182611ULL, 162348933090207ULL, 1948712273679932ULL,
+    1331832516262191ULL, 1219400369175863ULL, 89689036937483ULL, 1554886057235815ULL,
+    1520047528432789ULL, 81263957652811ULL, 146612464257008ULL, 2207945627164163ULL,
+    919846660682546ULL, 1925694087906686ULL, 2102027292388012ULL, 887992003198635ULL,
+    1817924871537027ULL, 746660005584342ULL, 753757153275525ULL, 91394270908699ULL,
+    511837226544151ULL, 736341543649373ULL, 1256371121466367ULL, 1977778299551813ULL,
+    817915174462263ULL, 1602323381418035ULL, 190035164572930ULL, 603796401391181ULL,
+    2152666873671669ULL, 1813900316324112ULL, 1292622433358041ULL, 888439870199892ULL,
+    978918155071994ULL, 534184417909805ULL, 466460084317313ULL, 1275223140288685ULL,
+    786407043883517ULL, 1620520623925754ULL, 1753625021290269ULL, 751937175104525ULL,
+    905301961820613ULL, 697059847245437ULL, 584919033981144ULL, 1272165506533156ULL,
+    1532180021450866ULL, 1901407354005301ULL, 1421319720492586ULL, 2179081609765456ULL,
+    2193253156667632ULL, 1080248329608584ULL, 2158422436462066ULL, 759167597017850ULL,
+    545759071151285ULL, 641600428493698ULL, 943791424499848ULL, 469571542427864ULL,
+    951117845222467ULL, 1780538594373407ULL, 614611122040309ULL, 1354826131886963ULL,
+    221898131992340ULL, 1145699723916219ULL, 798735379961769ULL, 1843560518208287ULL,
+    1424523160161545ULL, 205549016574779ULL, 2239491587362749ULL, 1918363582399888ULL,
+    1292183072788455ULL, 1783513123192567ULL, 1584027954317205ULL, 1890421443925740ULL,
+    1718459319874929ULL, 1522091040748809ULL, 399467600667219ULL, 1870973059066576ULL,
+    287514433150348ULL, 1397845311152885ULL, 1880440629872863ULL, 709302939340341ULL,
+    1813571361109209ULL, 86598795876860ULL, 1146964554310612ULL, 1590956584862432ULL,
+    2097004628155559ULL, 656227622102390ULL, 1808500445541891ULL, 958336726523135ULL,
+    2007604569465975ULL, 313504950390997ULL, 1399686004953620ULL, 1759732788465234ULL,
+    1562539721055836ULL, 1575722765016293ULL, 793318366641259ULL, 443876859384887ULL,
+    547308921989704ULL, 636698687503328ULL, 2179175835287340ULL, 498333551718258ULL,
+    932248760026176ULL, 1612395686304653ULL, 2179774103745626ULL, 1359658123541018ULL,
+    171488501802442ULL, 1625034951791350ULL, 520196922773633ULL, 1873787546341877ULL,
+    303457823885368ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5[640U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U, (uint64_t)983226445635730U,
-    (uint64_t)44873798519521U, (uint64_t)697147127512130U, (uint64_t)961631038239304U,
-    (uint64_t)709966160696826U, (uint64_t)1706677689540366U, (uint64_t)502782733796035U,
-    (uint64_t)812545535346033U, (uint64_t)1693622521296452U, (uint64_t)1955813093002510U,
-    (uint64_t)1259937612881362U, (uint64_t)1873032503803559U, (uint64_t)1140330566016428U,
-    (uint64_t)1675726082440190U, (uint64_t)60029928909786U, (uint64_t)170335608866763U,
-    (uint64_t)766444312315022U, (uint64_t)2025049511434113U, (uint64_t)2200845622430647U,
-    (uint64_t)1201269851450408U, (uint64_t)590071752404907U, (uint64_t)1400995030286946U,
-    (uint64_t)2152637413853822U, (uint64_t)2108495473841983U, (uint64_t)3855406710349U,
-    (uint64_t)1726137673168580U, (uint64_t)51004317200100U, (uint64_t)1749082328586939U,
-    (uint64_t)1704088976144558U, (uint64_t)1977318954775118U, (uint64_t)2062602253162400U,
-    (uint64_t)948062503217479U, (uint64_t)361953965048030U, (uint64_t)1528264887238440U,
-    (uint64_t)62582552172290U, (uint64_t)2241602163389280U, (uint64_t)156385388121765U,
-    (uint64_t)2124100319761492U, (uint64_t)388928050571382U, (uint64_t)1556123596922727U,
-    (uint64_t)979310669812384U, (uint64_t)113043855206104U, (uint64_t)2023223924825469U,
-    (uint64_t)643651703263034U, (uint64_t)2234446903655540U, (uint64_t)1577241261424997U,
-    (uint64_t)860253174523845U, (uint64_t)1691026473082448U, (uint64_t)1091672764933872U,
-    (uint64_t)1957463109756365U, (uint64_t)530699502660193U, (uint64_t)349587141723569U,
-    (uint64_t)674661681919563U, (uint64_t)1633727303856240U, (uint64_t)708909037922144U,
-    (uint64_t)2160722508518119U, (uint64_t)1302188051602540U, (uint64_t)976114603845777U,
-    (uint64_t)120004758721939U, (uint64_t)1681630708873780U, (uint64_t)622274095069244U,
-    (uint64_t)1822346309016698U, (uint64_t)1100921177951904U, (uint64_t)2216952659181677U,
-    (uint64_t)1844020550362490U, (uint64_t)1976451368365774U, (uint64_t)1321101422068822U,
-    (uint64_t)1189859436282668U, (uint64_t)2008801879735257U, (uint64_t)2219413454333565U,
-    (uint64_t)424288774231098U, (uint64_t)359793146977912U, (uint64_t)270293357948703U,
-    (uint64_t)587226003677000U, (uint64_t)1482071926139945U, (uint64_t)1419630774650359U,
-    (uint64_t)1104739070570175U, (uint64_t)1662129023224130U, (uint64_t)1609203612533411U,
-    (uint64_t)1250932720691980U, (uint64_t)95215711818495U, (uint64_t)498746909028150U,
-    (uint64_t)158151296991874U, (uint64_t)1201379988527734U, (uint64_t)561599945143989U,
-    (uint64_t)2211577425617888U, (uint64_t)2166577612206324U, (uint64_t)1057590354233512U,
-    (uint64_t)1968123280416769U, (uint64_t)1316586165401313U, (uint64_t)762728164447634U,
-    (uint64_t)2045395244316047U, (uint64_t)1531796898725716U, (uint64_t)315385971670425U,
-    (uint64_t)1109421039396756U, (uint64_t)2183635256408562U, (uint64_t)1896751252659461U,
-    (uint64_t)840236037179080U, (uint64_t)796245792277211U, (uint64_t)508345890111193U,
-    (uint64_t)1275386465287222U, (uint64_t)513560822858784U, (uint64_t)1784735733120313U,
-    (uint64_t)1346467478899695U, (uint64_t)601125231208417U, (uint64_t)701076661112726U,
-    (uint64_t)1841998436455089U, (uint64_t)1156768600940434U, (uint64_t)1967853462343221U,
-    (uint64_t)2178318463061452U, (uint64_t)481885520752741U, (uint64_t)675262828640945U,
-    (uint64_t)1033539418596582U, (uint64_t)1743329872635846U, (uint64_t)159322641251283U,
-    (uint64_t)1573076470127113U, (uint64_t)954827619308195U, (uint64_t)778834750662635U,
-    (uint64_t)619912782122617U, (uint64_t)515681498488209U, (uint64_t)1675866144246843U,
-    (uint64_t)811716020969981U, (uint64_t)1125515272217398U, (uint64_t)1398917918287342U,
-    (uint64_t)1301680949183175U, (uint64_t)726474739583734U, (uint64_t)587246193475200U,
-    (uint64_t)1096581582611864U, (uint64_t)1469911826213486U, (uint64_t)1990099711206364U,
-    (uint64_t)1256496099816508U, (uint64_t)2019924615195672U, (uint64_t)1251232456707555U,
-    (uint64_t)2042971196009755U, (uint64_t)214061878479265U, (uint64_t)115385726395472U,
-    (uint64_t)1677875239524132U, (uint64_t)756888883383540U, (uint64_t)1153862117756233U,
-    (uint64_t)503391530851096U, (uint64_t)946070017477513U, (uint64_t)1878319040542579U,
-    (uint64_t)1101349418586920U, (uint64_t)793245696431613U, (uint64_t)397920495357645U,
-    (uint64_t)2174023872951112U, (uint64_t)1517867915189593U, (uint64_t)1829855041462995U,
-    (uint64_t)1046709983503619U, (uint64_t)424081940711857U, (uint64_t)2112438073094647U,
-    (uint64_t)1504338467349861U, (uint64_t)2244574127374532U, (uint64_t)2136937537441911U,
-    (uint64_t)1741150838990304U, (uint64_t)25894628400571U, (uint64_t)512213526781178U,
-    (uint64_t)1168384260796379U, (uint64_t)1424607682379833U, (uint64_t)938677789731564U,
-    (uint64_t)872882241891896U, (uint64_t)1713199397007700U, (uint64_t)1410496326218359U,
-    (uint64_t)854379752407031U, (uint64_t)465141611727634U, (uint64_t)315176937037857U,
-    (uint64_t)1020115054571233U, (uint64_t)1856290111077229U, (uint64_t)2028366269898204U,
-    (uint64_t)1432980880307543U, (uint64_t)469932710425448U, (uint64_t)581165267592247U,
-    (uint64_t)496399148156603U, (uint64_t)2063435226705903U, (uint64_t)2116841086237705U,
-    (uint64_t)498272567217048U, (uint64_t)1829438076967906U, (uint64_t)1573925801278491U,
-    (uint64_t)460763576329867U, (uint64_t)1705264723728225U, (uint64_t)999514866082412U,
-    (uint64_t)29635061779362U, (uint64_t)1884233592281020U, (uint64_t)1449755591461338U,
-    (uint64_t)42579292783222U, (uint64_t)1869504355369200U, (uint64_t)495506004805251U,
-    (uint64_t)264073104888427U, (uint64_t)2088880861028612U, (uint64_t)104646456386576U,
-    (uint64_t)1258445191399967U, (uint64_t)1348736801545799U, (uint64_t)2068276361286613U,
-    (uint64_t)884897216646374U, (uint64_t)922387476801376U, (uint64_t)1043886580402805U,
-    (uint64_t)1240883498470831U, (uint64_t)1601554651937110U, (uint64_t)804382935289482U,
-    (uint64_t)512379564477239U, (uint64_t)1466384519077032U, (uint64_t)1280698500238386U,
-    (uint64_t)211303836685749U, (uint64_t)2081725624793803U, (uint64_t)545247644516879U,
-    (uint64_t)215313359330384U, (uint64_t)286479751145614U, (uint64_t)2213650281751636U,
-    (uint64_t)2164927945999874U, (uint64_t)2072162991540882U, (uint64_t)1443769115444779U,
-    (uint64_t)1581473274363095U, (uint64_t)434633875922699U, (uint64_t)340456055781599U,
-    (uint64_t)373043091080189U, (uint64_t)839476566531776U, (uint64_t)1856706858509978U,
-    (uint64_t)931616224909153U, (uint64_t)1888181317414065U, (uint64_t)213654322650262U,
-    (uint64_t)1161078103416244U, (uint64_t)1822042328851513U, (uint64_t)915817709028812U,
-    (uint64_t)1828297056698188U, (uint64_t)1212017130909403U, (uint64_t)60258343247333U,
-    (uint64_t)342085800008230U, (uint64_t)930240559508270U, (uint64_t)1549884999174952U,
-    (uint64_t)809895264249462U, (uint64_t)184726257947682U, (uint64_t)1157065433504828U,
-    (uint64_t)1209999630381477U, (uint64_t)999920399374391U, (uint64_t)1714770150788163U,
-    (uint64_t)2026130985413228U, (uint64_t)506776632883140U, (uint64_t)1349042668246528U,
-    (uint64_t)1937232292976967U, (uint64_t)942302637530730U, (uint64_t)160211904766226U,
-    (uint64_t)1042724500438571U, (uint64_t)212454865139142U, (uint64_t)244104425172642U,
-    (uint64_t)1376990622387496U, (uint64_t)76126752421227U, (uint64_t)1027540886376422U,
-    (uint64_t)1912210655133026U, (uint64_t)13410411589575U, (uint64_t)1475856708587773U,
-    (uint64_t)615563352691682U, (uint64_t)1446629324872644U, (uint64_t)1683670301784014U,
-    (uint64_t)1049873327197127U, (uint64_t)1826401704084838U, (uint64_t)2032577048760775U,
-    (uint64_t)1922203607878853U, (uint64_t)836708788764806U, (uint64_t)2193084654695012U,
-    (uint64_t)1342923183256659U, (uint64_t)849356986294271U, (uint64_t)1228863973965618U,
-    (uint64_t)94886161081867U, (uint64_t)1423288430204892U, (uint64_t)2016167528707016U,
-    (uint64_t)1633187660972877U, (uint64_t)1550621242301752U, (uint64_t)340630244512994U,
-    (uint64_t)2103577710806901U, (uint64_t)221625016538931U, (uint64_t)421544147350960U,
-    (uint64_t)580428704555156U, (uint64_t)1479831381265617U, (uint64_t)518057926544698U,
-    (uint64_t)955027348790630U, (uint64_t)1326749172561598U, (uint64_t)1118304625755967U,
-    (uint64_t)1994005916095176U, (uint64_t)1799757332780663U, (uint64_t)751343129396941U,
-    (uint64_t)1468672898746144U, (uint64_t)1451689964451386U, (uint64_t)755070293921171U,
-    (uint64_t)904857405877052U, (uint64_t)1276087530766984U, (uint64_t)403986562858511U,
-    (uint64_t)1530661255035337U, (uint64_t)1644972908910502U, (uint64_t)1370170080438957U,
-    (uint64_t)139839536695744U, (uint64_t)909930462436512U, (uint64_t)1899999215356933U,
-    (uint64_t)635992381064566U, (uint64_t)788740975837654U, (uint64_t)224241231493695U,
-    (uint64_t)1267090030199302U, (uint64_t)998908061660139U, (uint64_t)1784537499699278U,
-    (uint64_t)859195370018706U, (uint64_t)1953966091439379U, (uint64_t)2189271820076010U,
-    (uint64_t)2039067059943978U, (uint64_t)1526694380855202U, (uint64_t)2040321513194941U,
-    (uint64_t)329922071218689U, (uint64_t)1953032256401326U, (uint64_t)989631424403521U,
-    (uint64_t)328825014934242U, (uint64_t)9407151397696U, (uint64_t)63551373671268U,
-    (uint64_t)1624728632895792U, (uint64_t)1608324920739262U, (uint64_t)1178239350351945U,
-    (uint64_t)1198077399579702U, (uint64_t)277620088676229U, (uint64_t)1775359437312528U,
-    (uint64_t)1653558177737477U, (uint64_t)1652066043408850U, (uint64_t)1063359889686622U,
-    (uint64_t)1975063804860653U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL,
+    983226445635730ULL, 44873798519521ULL, 697147127512130ULL, 961631038239304ULL,
+    709966160696826ULL, 1706677689540366ULL, 502782733796035ULL, 812545535346033ULL,
+    1693622521296452ULL, 1955813093002510ULL, 1259937612881362ULL, 1873032503803559ULL,
+    1140330566016428ULL, 1675726082440190ULL, 60029928909786ULL, 170335608866763ULL,
+    766444312315022ULL, 2025049511434113ULL, 2200845622430647ULL, 1201269851450408ULL,
+    590071752404907ULL, 1400995030286946ULL, 2152637413853822ULL, 2108495473841983ULL,
+    3855406710349ULL, 1726137673168580ULL, 51004317200100ULL, 1749082328586939ULL,
+    1704088976144558ULL, 1977318954775118ULL, 2062602253162400ULL, 948062503217479ULL,
+    361953965048030ULL, 1528264887238440ULL, 62582552172290ULL, 2241602163389280ULL,
+    156385388121765ULL, 2124100319761492ULL, 388928050571382ULL, 1556123596922727ULL,
+    979310669812384ULL, 113043855206104ULL, 2023223924825469ULL, 643651703263034ULL,
+    2234446903655540ULL, 1577241261424997ULL, 860253174523845ULL, 1691026473082448ULL,
+    1091672764933872ULL, 1957463109756365ULL, 530699502660193ULL, 349587141723569ULL,
+    674661681919563ULL, 1633727303856240ULL, 708909037922144ULL, 2160722508518119ULL,
+    1302188051602540ULL, 976114603845777ULL, 120004758721939ULL, 1681630708873780ULL,
+    622274095069244ULL, 1822346309016698ULL, 1100921177951904ULL, 2216952659181677ULL,
+    1844020550362490ULL, 1976451368365774ULL, 1321101422068822ULL, 1189859436282668ULL,
+    2008801879735257ULL, 2219413454333565ULL, 424288774231098ULL, 359793146977912ULL,
+    270293357948703ULL, 587226003677000ULL, 1482071926139945ULL, 1419630774650359ULL,
+    1104739070570175ULL, 1662129023224130ULL, 1609203612533411ULL, 1250932720691980ULL,
+    95215711818495ULL, 498746909028150ULL, 158151296991874ULL, 1201379988527734ULL,
+    561599945143989ULL, 2211577425617888ULL, 2166577612206324ULL, 1057590354233512ULL,
+    1968123280416769ULL, 1316586165401313ULL, 762728164447634ULL, 2045395244316047ULL,
+    1531796898725716ULL, 315385971670425ULL, 1109421039396756ULL, 2183635256408562ULL,
+    1896751252659461ULL, 840236037179080ULL, 796245792277211ULL, 508345890111193ULL,
+    1275386465287222ULL, 513560822858784ULL, 1784735733120313ULL, 1346467478899695ULL,
+    601125231208417ULL, 701076661112726ULL, 1841998436455089ULL, 1156768600940434ULL,
+    1967853462343221ULL, 2178318463061452ULL, 481885520752741ULL, 675262828640945ULL,
+    1033539418596582ULL, 1743329872635846ULL, 159322641251283ULL, 1573076470127113ULL,
+    954827619308195ULL, 778834750662635ULL, 619912782122617ULL, 515681498488209ULL,
+    1675866144246843ULL, 811716020969981ULL, 1125515272217398ULL, 1398917918287342ULL,
+    1301680949183175ULL, 726474739583734ULL, 587246193475200ULL, 1096581582611864ULL,
+    1469911826213486ULL, 1990099711206364ULL, 1256496099816508ULL, 2019924615195672ULL,
+    1251232456707555ULL, 2042971196009755ULL, 214061878479265ULL, 115385726395472ULL,
+    1677875239524132ULL, 756888883383540ULL, 1153862117756233ULL, 503391530851096ULL,
+    946070017477513ULL, 1878319040542579ULL, 1101349418586920ULL, 793245696431613ULL,
+    397920495357645ULL, 2174023872951112ULL, 1517867915189593ULL, 1829855041462995ULL,
+    1046709983503619ULL, 424081940711857ULL, 2112438073094647ULL, 1504338467349861ULL,
+    2244574127374532ULL, 2136937537441911ULL, 1741150838990304ULL, 25894628400571ULL,
+    512213526781178ULL, 1168384260796379ULL, 1424607682379833ULL, 938677789731564ULL,
+    872882241891896ULL, 1713199397007700ULL, 1410496326218359ULL, 854379752407031ULL,
+    465141611727634ULL, 315176937037857ULL, 1020115054571233ULL, 1856290111077229ULL,
+    2028366269898204ULL, 1432980880307543ULL, 469932710425448ULL, 581165267592247ULL,
+    496399148156603ULL, 2063435226705903ULL, 2116841086237705ULL, 498272567217048ULL,
+    1829438076967906ULL, 1573925801278491ULL, 460763576329867ULL, 1705264723728225ULL,
+    999514866082412ULL, 29635061779362ULL, 1884233592281020ULL, 1449755591461338ULL,
+    42579292783222ULL, 1869504355369200ULL, 495506004805251ULL, 264073104888427ULL,
+    2088880861028612ULL, 104646456386576ULL, 1258445191399967ULL, 1348736801545799ULL,
+    2068276361286613ULL, 884897216646374ULL, 922387476801376ULL, 1043886580402805ULL,
+    1240883498470831ULL, 1601554651937110ULL, 804382935289482ULL, 512379564477239ULL,
+    1466384519077032ULL, 1280698500238386ULL, 211303836685749ULL, 2081725624793803ULL,
+    545247644516879ULL, 215313359330384ULL, 286479751145614ULL, 2213650281751636ULL,
+    2164927945999874ULL, 2072162991540882ULL, 1443769115444779ULL, 1581473274363095ULL,
+    434633875922699ULL, 340456055781599ULL, 373043091080189ULL, 839476566531776ULL,
+    1856706858509978ULL, 931616224909153ULL, 1888181317414065ULL, 213654322650262ULL,
+    1161078103416244ULL, 1822042328851513ULL, 915817709028812ULL, 1828297056698188ULL,
+    1212017130909403ULL, 60258343247333ULL, 342085800008230ULL, 930240559508270ULL,
+    1549884999174952ULL, 809895264249462ULL, 184726257947682ULL, 1157065433504828ULL,
+    1209999630381477ULL, 999920399374391ULL, 1714770150788163ULL, 2026130985413228ULL,
+    506776632883140ULL, 1349042668246528ULL, 1937232292976967ULL, 942302637530730ULL,
+    160211904766226ULL, 1042724500438571ULL, 212454865139142ULL, 244104425172642ULL,
+    1376990622387496ULL, 76126752421227ULL, 1027540886376422ULL, 1912210655133026ULL,
+    13410411589575ULL, 1475856708587773ULL, 615563352691682ULL, 1446629324872644ULL,
+    1683670301784014ULL, 1049873327197127ULL, 1826401704084838ULL, 2032577048760775ULL,
+    1922203607878853ULL, 836708788764806ULL, 2193084654695012ULL, 1342923183256659ULL,
+    849356986294271ULL, 1228863973965618ULL, 94886161081867ULL, 1423288430204892ULL,
+    2016167528707016ULL, 1633187660972877ULL, 1550621242301752ULL, 340630244512994ULL,
+    2103577710806901ULL, 221625016538931ULL, 421544147350960ULL, 580428704555156ULL,
+    1479831381265617ULL, 518057926544698ULL, 955027348790630ULL, 1326749172561598ULL,
+    1118304625755967ULL, 1994005916095176ULL, 1799757332780663ULL, 751343129396941ULL,
+    1468672898746144ULL, 1451689964451386ULL, 755070293921171ULL, 904857405877052ULL,
+    1276087530766984ULL, 403986562858511ULL, 1530661255035337ULL, 1644972908910502ULL,
+    1370170080438957ULL, 139839536695744ULL, 909930462436512ULL, 1899999215356933ULL,
+    635992381064566ULL, 788740975837654ULL, 224241231493695ULL, 1267090030199302ULL,
+    998908061660139ULL, 1784537499699278ULL, 859195370018706ULL, 1953966091439379ULL,
+    2189271820076010ULL, 2039067059943978ULL, 1526694380855202ULL, 2040321513194941ULL,
+    329922071218689ULL, 1953032256401326ULL, 989631424403521ULL, 328825014934242ULL,
+    9407151397696ULL, 63551373671268ULL, 1624728632895792ULL, 1608324920739262ULL,
+    1178239350351945ULL, 1198077399579702ULL, 277620088676229ULL, 1775359437312528ULL,
+    1653558177737477ULL, 1652066043408850ULL, 1063359889686622ULL, 1975063804860653ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Frodo_KEM.h b/include/msvc/internal/Hacl_Frodo_KEM.h
index 61574981..c36ec7f1 100644
--- a/include/msvc/internal/Hacl_Frodo_KEM.h
+++ b/include/msvc/internal/Hacl_Frodo_KEM.h
@@ -64,13 +64,13 @@ Hacl_Keccak_shake128_4x(
 static inline void
 Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 {
-  if (logq < (uint32_t)16U)
+  if (logq < 16U)
   {
-    for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+    for (uint32_t i0 = 0U; i0 < n1; i0++)
     {
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      for (uint32_t i = 0U; i < n2; i++)
       {
-        a[i0 * n2 + i] = a[i0 * n2 + i] & (((uint16_t)1U << logq) - (uint16_t)1U);
+        a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] & ((1U << logq) - 1U);
       }
     }
     return;
@@ -80,11 +80,11 @@ Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 static inline void
 Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      a[i0 * n2 + i] = a[i0 * n2 + i] + b[i0 * n2 + i];
+      a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] + (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -92,11 +92,11 @@ Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_sub(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      b[i0 * n2 + i] = a[i0 * n2 + i] - b[i0 * n2 + i];
+      b[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] - (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -111,17 +111,17 @@ Hacl_Impl_Matrix_matrix_mul(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i * n3 + i1];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -138,17 +138,17 @@ Hacl_Impl_Matrix_matrix_mul_s(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i1 * n2 + i];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -158,11 +158,11 @@ Hacl_Impl_Matrix_matrix_mul_s(
 static inline uint16_t
 Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  uint16_t res = (uint16_t)0xFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  uint16_t res = 0xFFFFU;
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t uu____0 = FStar_UInt16_eq_mask(a[i], b[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint16_t r = res;
   return r;
@@ -171,19 +171,19 @@ Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_to_lbytes(uint32_t n1, uint32_t n2, uint16_t *m, uint8_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
-    store16_le(res + (uint32_t)2U * i, m[i]);
+    store16_le(res + 2U * i, m[i]);
   }
 }
 
 static inline void
 Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t *os = res;
-    uint16_t u = load16_le(b + (uint32_t)2U * i);
+    uint16_t u = load16_le(b + 2U * i);
     uint16_t x = u;
     os[i] = x;
   }
@@ -192,53 +192,53 @@ Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16
 static inline void
 Hacl_Impl_Frodo_Gen_frodo_gen_matrix_shake_4x(uint32_t n, uint8_t *seed, uint16_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), (uint32_t)8U * n);
-  uint8_t *r = (uint8_t *)alloca((uint32_t)8U * n * sizeof (uint8_t));
-  memset(r, 0U, (uint32_t)8U * n * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), 8U * n);
+  uint8_t *r = (uint8_t *)alloca(8U * n * sizeof (uint8_t));
+  memset(r, 0U, 8U * n * sizeof (uint8_t));
   uint8_t tmp_seed[72U] = { 0U };
-  memcpy(tmp_seed + (uint32_t)2U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)20U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)38U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)56U, seed, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 2U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 20U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 38U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 56U, seed, 16U * sizeof (uint8_t));
   memset(res, 0U, n * n * sizeof (uint16_t));
-  for (uint32_t i = (uint32_t)0U; i < n / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < n / 4U; i++)
   {
-    uint8_t *r0 = r + (uint32_t)0U * n;
-    uint8_t *r1 = r + (uint32_t)2U * n;
-    uint8_t *r2 = r + (uint32_t)4U * n;
-    uint8_t *r3 = r + (uint32_t)6U * n;
+    uint8_t *r0 = r + 0U * n;
+    uint8_t *r1 = r + 2U * n;
+    uint8_t *r2 = r + 4U * n;
+    uint8_t *r3 = r + 6U * n;
     uint8_t *tmp_seed0 = tmp_seed;
-    uint8_t *tmp_seed1 = tmp_seed + (uint32_t)18U;
-    uint8_t *tmp_seed2 = tmp_seed + (uint32_t)36U;
-    uint8_t *tmp_seed3 = tmp_seed + (uint32_t)54U;
-    store16_le(tmp_seed0, (uint16_t)((uint32_t)4U * i + (uint32_t)0U));
-    store16_le(tmp_seed1, (uint16_t)((uint32_t)4U * i + (uint32_t)1U));
-    store16_le(tmp_seed2, (uint16_t)((uint32_t)4U * i + (uint32_t)2U));
-    store16_le(tmp_seed3, (uint16_t)((uint32_t)4U * i + (uint32_t)3U));
-    Hacl_Keccak_shake128_4x((uint32_t)18U,
+    uint8_t *tmp_seed1 = tmp_seed + 18U;
+    uint8_t *tmp_seed2 = tmp_seed + 36U;
+    uint8_t *tmp_seed3 = tmp_seed + 54U;
+    store16_le(tmp_seed0, (uint16_t)(4U * i + 0U));
+    store16_le(tmp_seed1, (uint16_t)(4U * i + 1U));
+    store16_le(tmp_seed2, (uint16_t)(4U * i + 2U));
+    store16_le(tmp_seed3, (uint16_t)(4U * i + 3U));
+    Hacl_Keccak_shake128_4x(18U,
       tmp_seed0,
       tmp_seed1,
       tmp_seed2,
       tmp_seed3,
-      (uint32_t)2U * n,
+      2U * n,
       r0,
       r1,
       r2,
       r3);
-    for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+    for (uint32_t i0 = 0U; i0 < n; i0++)
     {
-      uint8_t *resij0 = r0 + i0 * (uint32_t)2U;
-      uint8_t *resij1 = r1 + i0 * (uint32_t)2U;
-      uint8_t *resij2 = r2 + i0 * (uint32_t)2U;
-      uint8_t *resij3 = r3 + i0 * (uint32_t)2U;
+      uint8_t *resij0 = r0 + i0 * 2U;
+      uint8_t *resij1 = r1 + i0 * 2U;
+      uint8_t *resij2 = r2 + i0 * 2U;
+      uint8_t *resij3 = r3 + i0 * 2U;
       uint16_t u = load16_le(resij0);
-      res[((uint32_t)4U * i + (uint32_t)0U) * n + i0] = u;
+      res[(4U * i + 0U) * n + i0] = u;
       uint16_t u0 = load16_le(resij1);
-      res[((uint32_t)4U * i + (uint32_t)1U) * n + i0] = u0;
+      res[(4U * i + 1U) * n + i0] = u0;
       uint16_t u1 = load16_le(resij2);
-      res[((uint32_t)4U * i + (uint32_t)2U) * n + i0] = u1;
+      res[(4U * i + 2U) * n + i0] = u1;
       uint16_t u2 = load16_le(resij3);
-      res[((uint32_t)4U * i + (uint32_t)3U) * n + i0] = u2;
+      res[(4U * i + 3U) * n + i0] = u2;
     }
   }
 }
@@ -270,27 +270,19 @@ static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table640[13U] =
   {
-    (uint16_t)4643U, (uint16_t)13363U, (uint16_t)20579U, (uint16_t)25843U, (uint16_t)29227U,
-    (uint16_t)31145U, (uint16_t)32103U, (uint16_t)32525U, (uint16_t)32689U, (uint16_t)32745U,
-    (uint16_t)32762U, (uint16_t)32766U, (uint16_t)32767U
+    4643U, 13363U, 20579U, 25843U, 29227U, 31145U, 32103U, 32525U, 32689U, 32745U, 32762U, 32766U,
+    32767U
   };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table976[11U] =
-  {
-    (uint16_t)5638U, (uint16_t)15915U, (uint16_t)23689U, (uint16_t)28571U, (uint16_t)31116U,
-    (uint16_t)32217U, (uint16_t)32613U, (uint16_t)32731U, (uint16_t)32760U, (uint16_t)32766U,
-    (uint16_t)32767U
-  };
+  { 5638U, 15915U, 23689U, 28571U, 31116U, 32217U, 32613U, 32731U, 32760U, 32766U, 32767U };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table1344[7U] =
-  {
-    (uint16_t)9142U, (uint16_t)23462U, (uint16_t)30338U, (uint16_t)32361U, (uint16_t)32725U,
-    (uint16_t)32765U, (uint16_t)32767U
-  };
+  { 9142U, 23462U, 30338U, 32361U, 32725U, 32765U, 32767U };
 
 static inline void
 Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
@@ -301,26 +293,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -334,26 +326,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -367,26 +359,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)10U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 10U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table976[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -400,26 +392,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)6U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 6U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table1344[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -435,39 +427,34 @@ Hacl_Impl_Frodo_Pack_frodo_pack(
   uint8_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
-    uint16_t *a1 = a + (uint32_t)8U * i;
+    uint16_t *a1 = a + 8U * i;
     uint8_t *r = res + d * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t v16[16U] = { 0U };
-    uint16_t a0 = a1[0U] & maskd;
-    uint16_t a11 = a1[1U] & maskd;
-    uint16_t a2 = a1[2U] & maskd;
-    uint16_t a3 = a1[3U] & maskd;
-    uint16_t a4 = a1[4U] & maskd;
-    uint16_t a5 = a1[5U] & maskd;
-    uint16_t a6 = a1[6U] & maskd;
-    uint16_t a7 = a1[7U] & maskd;
+    uint16_t a0 = (uint32_t)a1[0U] & (uint32_t)maskd;
+    uint16_t a11 = (uint32_t)a1[1U] & (uint32_t)maskd;
+    uint16_t a2 = (uint32_t)a1[2U] & (uint32_t)maskd;
+    uint16_t a3 = (uint32_t)a1[3U] & (uint32_t)maskd;
+    uint16_t a4 = (uint32_t)a1[4U] & (uint32_t)maskd;
+    uint16_t a5 = (uint32_t)a1[5U] & (uint32_t)maskd;
+    uint16_t a6 = (uint32_t)a1[6U] & (uint32_t)maskd;
+    uint16_t a7 = (uint32_t)a1[7U] & (uint32_t)maskd;
     FStar_UInt128_uint128
     templong =
       FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a0),
-                      (uint32_t)7U * d),
-                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11),
-                      (uint32_t)6U * d)),
-                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2),
-                    (uint32_t)5U * d)),
-                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3),
-                  (uint32_t)4U * d)),
-              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4),
-                (uint32_t)3U * d)),
-            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5),
-              (uint32_t)2U * d)),
-          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), (uint32_t)1U * d)),
-        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), (uint32_t)0U * d));
+                      7U * d),
+                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11), 6U * d)),
+                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2), 5U * d)),
+                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3), 4U * d)),
+              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4), 3U * d)),
+            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5), 2U * d)),
+          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), 1U * d)),
+        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), 0U * d));
     store128_be(v16, templong);
-    uint8_t *src = v16 + (uint32_t)16U - d;
+    uint8_t *src = v16 + 16U - d;
     memcpy(r, src, d * sizeof (uint8_t));
   }
 }
@@ -481,48 +468,48 @@ Hacl_Impl_Frodo_Pack_frodo_unpack(
   uint16_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *b1 = b + d * i;
-    uint16_t *r = res + (uint32_t)8U * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t *r = res + 8U * i;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t src[16U] = { 0U };
-    memcpy(src + (uint32_t)16U - d, b1, d * sizeof (uint8_t));
+    memcpy(src + 16U - d, b1, d * sizeof (uint8_t));
     FStar_UInt128_uint128 u = load128_be(src);
     FStar_UInt128_uint128 templong = u;
     r[0U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)7U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          7U * d))
+      & (uint32_t)maskd;
     r[1U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)6U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          6U * d))
+      & (uint32_t)maskd;
     r[2U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)5U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          5U * d))
+      & (uint32_t)maskd;
     r[3U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)4U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          4U * d))
+      & (uint32_t)maskd;
     r[4U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)3U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          3U * d))
+      & (uint32_t)maskd;
     r[5U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)2U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          2U * d))
+      & (uint32_t)maskd;
     r[6U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)1U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          1U * d))
+      & (uint32_t)maskd;
     r[7U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)0U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          0U * d))
+      & (uint32_t)maskd;
   }
 }
 
@@ -535,7 +522,7 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
   uint16_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
     uint8_t v8[8U] = { 0U };
     uint8_t *chunk = a + i0 * b;
@@ -544,11 +531,11 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
     uint64_t x = u;
     uint64_t x0 = x;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint64_t rk = x0 >> b * i & (((uint64_t)1U << b) - (uint64_t)1U);
-      res[i0 * n + i] = (uint16_t)rk << (logq - b););
+      0U,
+      8U,
+      1U,
+      uint64_t rk = x0 >> b * i & ((1ULL << b) - 1ULL);
+      res[i0 * n + i] = (uint32_t)(uint16_t)rk << (logq - b););
   }
 }
 
@@ -561,16 +548,16 @@ Hacl_Impl_Frodo_Encode_frodo_key_decode(
   uint8_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
-    uint64_t templong = (uint64_t)0U;
+    uint64_t templong = 0ULL;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
+      0U,
+      8U,
+      1U,
       uint16_t aik = a[i0 * n + i];
-      uint16_t res1 = (aik + ((uint16_t)1U << (logq - b - (uint32_t)1U))) >> (logq - b);
-      templong = templong | (uint64_t)(res1 & (((uint16_t)1U << b) - (uint16_t)1U)) << b * i;);
+      uint16_t res1 = (((uint32_t)aik + (1U << (logq - b - 1U))) & 0xFFFFU) >> (logq - b);
+      templong = templong | (uint64_t)((uint32_t)res1 & ((1U << b) - 1U)) << b * i;);
     uint64_t templong0 = templong;
     uint8_t v8[8U] = { 0U };
     store64_le(v8, templong0);
diff --git a/include/msvc/internal/Hacl_Hash_SHA2.h b/include/msvc/internal/Hacl_Hash_SHA2.h
index bbffdc50..8c912fb8 100644
--- a/include/msvc/internal/Hacl_Hash_SHA2.h
+++ b/include/msvc/internal/Hacl_Hash_SHA2.h
@@ -42,89 +42,74 @@ static const
 uint32_t
 Hacl_Impl_SHA2_Generic_h224[8U] =
   {
-    (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U,
-    (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U
+    0xc1059ed8U, 0x367cd507U, 0x3070dd17U, 0xf70e5939U, 0xffc00b31U, 0x68581511U, 0x64f98fa7U,
+    0xbefa4fa4U
   };
 
 static const
 uint32_t
 Hacl_Impl_SHA2_Generic_h256[8U] =
   {
-    (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
-    (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
+    0x6a09e667U, 0xbb67ae85U, 0x3c6ef372U, 0xa54ff53aU, 0x510e527fU, 0x9b05688cU, 0x1f83d9abU,
+    0x5be0cd19U
   };
 
 static const
 uint64_t
 Hacl_Impl_SHA2_Generic_h384[8U] =
   {
-    (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
-    (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
-    (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
+    0xcbbb9d5dc1059ed8ULL, 0x629a292a367cd507ULL, 0x9159015a3070dd17ULL, 0x152fecd8f70e5939ULL,
+    0x67332667ffc00b31ULL, 0x8eb44a8768581511ULL, 0xdb0c2e0d64f98fa7ULL, 0x47b5481dbefa4fa4ULL
   };
 
 static const
 uint64_t
 Hacl_Impl_SHA2_Generic_h512[8U] =
   {
-    (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
-    (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
-    (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
+    0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
+    0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
   };
 
 static const
 uint32_t
 Hacl_Impl_SHA2_Generic_k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 static const
 uint64_t
 Hacl_Impl_SHA2_Generic_k384_512[80U] =
   {
-    (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU,
-    (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U,
-    (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U,
-    (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U,
-    (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U,
-    (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U,
-    (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U,
-    (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U,
-    (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU,
-    (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U,
-    (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU,
-    (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU,
-    (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U,
-    (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U,
-    (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U,
-    (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U,
-    (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U,
-    (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU,
-    (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU,
-    (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU,
-    (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U,
-    (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U,
-    (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU,
-    (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU,
-    (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU,
-    (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU,
-    (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U
+    0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+    0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+    0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+    0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+    0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+    0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+    0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+    0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+    0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+    0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+    0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+    0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+    0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+    0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+    0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+    0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+    0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+    0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+    0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+    0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
   };
 
 void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash);
diff --git a/include/msvc/internal/Hacl_Impl_Blake2_Constants.h b/include/msvc/internal/Hacl_Impl_Blake2_Constants.h
index 185317ba..a5b6d4ce 100644
--- a/include/msvc/internal/Hacl_Impl_Blake2_Constants.h
+++ b/include/msvc/internal/Hacl_Impl_Blake2_Constants.h
@@ -39,50 +39,30 @@ static const
 uint32_t
 Hacl_Impl_Blake2_Constants_sigmaTable[160U] =
   {
-    (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U, (uint32_t)4U, (uint32_t)5U,
-    (uint32_t)6U, (uint32_t)7U, (uint32_t)8U, (uint32_t)9U, (uint32_t)10U, (uint32_t)11U,
-    (uint32_t)12U, (uint32_t)13U, (uint32_t)14U, (uint32_t)15U, (uint32_t)14U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)8U, (uint32_t)9U, (uint32_t)15U, (uint32_t)13U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)12U, (uint32_t)0U, (uint32_t)2U, (uint32_t)11U, (uint32_t)7U,
-    (uint32_t)5U, (uint32_t)3U, (uint32_t)11U, (uint32_t)8U, (uint32_t)12U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)2U, (uint32_t)15U, (uint32_t)13U, (uint32_t)10U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)6U, (uint32_t)7U, (uint32_t)1U, (uint32_t)9U, (uint32_t)4U,
-    (uint32_t)7U, (uint32_t)9U, (uint32_t)3U, (uint32_t)1U, (uint32_t)13U, (uint32_t)12U,
-    (uint32_t)11U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U, (uint32_t)5U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)0U, (uint32_t)15U, (uint32_t)8U, (uint32_t)9U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)7U, (uint32_t)2U, (uint32_t)4U, (uint32_t)10U, (uint32_t)15U,
-    (uint32_t)14U, (uint32_t)1U, (uint32_t)11U, (uint32_t)12U, (uint32_t)6U, (uint32_t)8U,
-    (uint32_t)3U, (uint32_t)13U, (uint32_t)2U, (uint32_t)12U, (uint32_t)6U, (uint32_t)10U,
-    (uint32_t)0U, (uint32_t)11U, (uint32_t)8U, (uint32_t)3U, (uint32_t)4U, (uint32_t)13U,
-    (uint32_t)7U, (uint32_t)5U, (uint32_t)15U, (uint32_t)14U, (uint32_t)1U, (uint32_t)9U,
-    (uint32_t)12U, (uint32_t)5U, (uint32_t)1U, (uint32_t)15U, (uint32_t)14U, (uint32_t)13U,
-    (uint32_t)4U, (uint32_t)10U, (uint32_t)0U, (uint32_t)7U, (uint32_t)6U, (uint32_t)3U,
-    (uint32_t)9U, (uint32_t)2U, (uint32_t)8U, (uint32_t)11U, (uint32_t)13U, (uint32_t)11U,
-    (uint32_t)7U, (uint32_t)14U, (uint32_t)12U, (uint32_t)1U, (uint32_t)3U, (uint32_t)9U,
-    (uint32_t)5U, (uint32_t)0U, (uint32_t)15U, (uint32_t)4U, (uint32_t)8U, (uint32_t)6U,
-    (uint32_t)2U, (uint32_t)10U, (uint32_t)6U, (uint32_t)15U, (uint32_t)14U, (uint32_t)9U,
-    (uint32_t)11U, (uint32_t)3U, (uint32_t)0U, (uint32_t)8U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)13U, (uint32_t)7U, (uint32_t)1U, (uint32_t)4U, (uint32_t)10U, (uint32_t)5U,
-    (uint32_t)10U, (uint32_t)2U, (uint32_t)8U, (uint32_t)4U, (uint32_t)7U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)5U, (uint32_t)15U, (uint32_t)11U, (uint32_t)9U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)12U, (uint32_t)13U
+    0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U, 12U, 13U, 14U, 15U, 14U, 10U, 4U, 8U, 9U, 15U,
+    13U, 6U, 1U, 12U, 0U, 2U, 11U, 7U, 5U, 3U, 11U, 8U, 12U, 0U, 5U, 2U, 15U, 13U, 10U, 14U, 3U, 6U,
+    7U, 1U, 9U, 4U, 7U, 9U, 3U, 1U, 13U, 12U, 11U, 14U, 2U, 6U, 5U, 10U, 4U, 0U, 15U, 8U, 9U, 0U,
+    5U, 7U, 2U, 4U, 10U, 15U, 14U, 1U, 11U, 12U, 6U, 8U, 3U, 13U, 2U, 12U, 6U, 10U, 0U, 11U, 8U, 3U,
+    4U, 13U, 7U, 5U, 15U, 14U, 1U, 9U, 12U, 5U, 1U, 15U, 14U, 13U, 4U, 10U, 0U, 7U, 6U, 3U, 9U, 2U,
+    8U, 11U, 13U, 11U, 7U, 14U, 12U, 1U, 3U, 9U, 5U, 0U, 15U, 4U, 8U, 6U, 2U, 10U, 6U, 15U, 14U, 9U,
+    11U, 3U, 0U, 8U, 12U, 2U, 13U, 7U, 1U, 4U, 10U, 5U, 10U, 2U, 8U, 4U, 7U, 6U, 1U, 5U, 15U, 11U,
+    9U, 14U, 3U, 12U, 13U
   };
 
 static const
 uint32_t
 Hacl_Impl_Blake2_Constants_ivTable_S[8U] =
   {
-    (uint32_t)0x6A09E667U, (uint32_t)0xBB67AE85U, (uint32_t)0x3C6EF372U, (uint32_t)0xA54FF53AU,
-    (uint32_t)0x510E527FU, (uint32_t)0x9B05688CU, (uint32_t)0x1F83D9ABU, (uint32_t)0x5BE0CD19U
+    0x6A09E667U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, 0x510E527FU, 0x9B05688CU, 0x1F83D9ABU,
+    0x5BE0CD19U
   };
 
 static const
 uint64_t
 Hacl_Impl_Blake2_Constants_ivTable_B[8U] =
   {
-    (uint64_t)0x6A09E667F3BCC908U, (uint64_t)0xBB67AE8584CAA73BU, (uint64_t)0x3C6EF372FE94F82BU,
-    (uint64_t)0xA54FF53A5F1D36F1U, (uint64_t)0x510E527FADE682D1U, (uint64_t)0x9B05688C2B3E6C1FU,
-    (uint64_t)0x1F83D9ABFB41BD6BU, (uint64_t)0x5BE0CD19137E2179U
+    0x6A09E667F3BCC908ULL, 0xBB67AE8584CAA73BULL, 0x3C6EF372FE94F82BULL, 0xA54FF53A5F1D36F1ULL,
+    0x510E527FADE682D1ULL, 0x9B05688C2B3E6C1FULL, 0x1F83D9ABFB41BD6BULL, 0x5BE0CD19137E2179ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h b/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h
index c746c411..80cbdd52 100644
--- a/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h
+++ b/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h
@@ -35,528 +35,265 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { (uint8_t)0x02U };
+static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { 0x02U };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p2048[256U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x28U,
-    (uint8_t)0x5CU, (uint8_t)0x97U, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x28U, 0x5CU,
+    0x97U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p3072[384U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0xC6U, (uint8_t)0x2EU, (uint8_t)0x37U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0xC6U, 0x2EU, 0x37U, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p4096[512U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x65U, (uint8_t)0x5FU, (uint8_t)0x6AU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x65U, 0x5FU, 0x6AU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p6144[768U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xD0U, (uint8_t)0xE4U, (uint8_t)0x0EU, (uint8_t)0x65U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xD0U, 0xE4U, 0x0EU, 0x65U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p8192[1024U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xCFU, (uint8_t)0xF4U, (uint8_t)0x6AU, (uint8_t)0xAAU, (uint8_t)0x36U, (uint8_t)0xADU,
-    (uint8_t)0x00U, (uint8_t)0x4CU, (uint8_t)0xF6U, (uint8_t)0x00U, (uint8_t)0xC8U, (uint8_t)0x38U,
-    (uint8_t)0x1EU, (uint8_t)0x42U, (uint8_t)0x5AU, (uint8_t)0x31U, (uint8_t)0xD9U, (uint8_t)0x51U,
-    (uint8_t)0xAEU, (uint8_t)0x64U, (uint8_t)0xFDU, (uint8_t)0xB2U, (uint8_t)0x3FU, (uint8_t)0xCEU,
-    (uint8_t)0xC9U, (uint8_t)0x50U, (uint8_t)0x9DU, (uint8_t)0x43U, (uint8_t)0x68U, (uint8_t)0x7FU,
-    (uint8_t)0xEBU, (uint8_t)0x69U, (uint8_t)0xEDU, (uint8_t)0xD1U, (uint8_t)0xCCU, (uint8_t)0x5EU,
-    (uint8_t)0x0BU, (uint8_t)0x8CU, (uint8_t)0xC3U, (uint8_t)0xBDU, (uint8_t)0xF6U, (uint8_t)0x4BU,
-    (uint8_t)0x10U, (uint8_t)0xEFU, (uint8_t)0x86U, (uint8_t)0xB6U, (uint8_t)0x31U, (uint8_t)0x42U,
-    (uint8_t)0xA3U, (uint8_t)0xABU, (uint8_t)0x88U, (uint8_t)0x29U, (uint8_t)0x55U, (uint8_t)0x5BU,
-    (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x7CU, (uint8_t)0x93U, (uint8_t)0x26U, (uint8_t)0x65U,
-    (uint8_t)0xCBU, (uint8_t)0x2CU, (uint8_t)0x0FU, (uint8_t)0x1CU, (uint8_t)0xC0U, (uint8_t)0x1BU,
-    (uint8_t)0xD7U, (uint8_t)0x02U, (uint8_t)0x29U, (uint8_t)0x38U, (uint8_t)0x88U, (uint8_t)0x39U,
-    (uint8_t)0xD2U, (uint8_t)0xAFU, (uint8_t)0x05U, (uint8_t)0xE4U, (uint8_t)0x54U, (uint8_t)0x50U,
-    (uint8_t)0x4AU, (uint8_t)0xC7U, (uint8_t)0x8BU, (uint8_t)0x75U, (uint8_t)0x82U, (uint8_t)0x82U,
-    (uint8_t)0x28U, (uint8_t)0x46U, (uint8_t)0xC0U, (uint8_t)0xBAU, (uint8_t)0x35U, (uint8_t)0xC3U,
-    (uint8_t)0x5FU, (uint8_t)0x5CU, (uint8_t)0x59U, (uint8_t)0x16U, (uint8_t)0x0CU, (uint8_t)0xC0U,
-    (uint8_t)0x46U, (uint8_t)0xFDU, (uint8_t)0x82U, (uint8_t)0x51U, (uint8_t)0x54U, (uint8_t)0x1FU,
-    (uint8_t)0xC6U, (uint8_t)0x8CU, (uint8_t)0x9CU, (uint8_t)0x86U, (uint8_t)0xB0U, (uint8_t)0x22U,
-    (uint8_t)0xBBU, (uint8_t)0x70U, (uint8_t)0x99U, (uint8_t)0x87U, (uint8_t)0x6AU, (uint8_t)0x46U,
-    (uint8_t)0x0EU, (uint8_t)0x74U, (uint8_t)0x51U, (uint8_t)0xA8U, (uint8_t)0xA9U, (uint8_t)0x31U,
-    (uint8_t)0x09U, (uint8_t)0x70U, (uint8_t)0x3FU, (uint8_t)0xEEU, (uint8_t)0x1CU, (uint8_t)0x21U,
-    (uint8_t)0x7EU, (uint8_t)0x6CU, (uint8_t)0x38U, (uint8_t)0x26U, (uint8_t)0xE5U, (uint8_t)0x2CU,
-    (uint8_t)0x51U, (uint8_t)0xAAU, (uint8_t)0x69U, (uint8_t)0x1EU, (uint8_t)0x0EU, (uint8_t)0x42U,
-    (uint8_t)0x3CU, (uint8_t)0xFCU, (uint8_t)0x99U, (uint8_t)0xE9U, (uint8_t)0xE3U, (uint8_t)0x16U,
-    (uint8_t)0x50U, (uint8_t)0xC1U, (uint8_t)0x21U, (uint8_t)0x7BU, (uint8_t)0x62U, (uint8_t)0x48U,
-    (uint8_t)0x16U, (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x9AU, (uint8_t)0x95U, (uint8_t)0xF9U,
-    (uint8_t)0xD5U, (uint8_t)0xB8U, (uint8_t)0x01U, (uint8_t)0x94U, (uint8_t)0x88U, (uint8_t)0xD9U,
-    (uint8_t)0xC0U, (uint8_t)0xA0U, (uint8_t)0xA1U, (uint8_t)0xFEU, (uint8_t)0x30U, (uint8_t)0x75U,
-    (uint8_t)0xA5U, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0x31U, (uint8_t)0x83U, (uint8_t)0xF8U,
-    (uint8_t)0x1DU, (uint8_t)0x4AU, (uint8_t)0x3FU, (uint8_t)0x2FU, (uint8_t)0xA4U, (uint8_t)0x57U,
-    (uint8_t)0x1EU, (uint8_t)0xFCU, (uint8_t)0x8CU, (uint8_t)0xE0U, (uint8_t)0xBAU, (uint8_t)0x8AU,
-    (uint8_t)0x4FU, (uint8_t)0xE8U, (uint8_t)0xB6U, (uint8_t)0x85U, (uint8_t)0x5DU, (uint8_t)0xFEU,
-    (uint8_t)0x72U, (uint8_t)0xB0U, (uint8_t)0xA6U, (uint8_t)0x6EU, (uint8_t)0xDEU, (uint8_t)0xD2U,
-    (uint8_t)0xFBU, (uint8_t)0xABU, (uint8_t)0xFBU, (uint8_t)0xE5U, (uint8_t)0x8AU, (uint8_t)0x30U,
-    (uint8_t)0xFAU, (uint8_t)0xFAU, (uint8_t)0xBEU, (uint8_t)0x1CU, (uint8_t)0x5DU, (uint8_t)0x71U,
-    (uint8_t)0xA8U, (uint8_t)0x7EU, (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x1EU, (uint8_t)0xF8U,
-    (uint8_t)0xC1U, (uint8_t)0xFEU, (uint8_t)0x86U, (uint8_t)0xFEU, (uint8_t)0xA6U, (uint8_t)0xBBU,
-    (uint8_t)0xFDU, (uint8_t)0xE5U, (uint8_t)0x30U, (uint8_t)0x67U, (uint8_t)0x7FU, (uint8_t)0x0DU,
-    (uint8_t)0x97U, (uint8_t)0xD1U, (uint8_t)0x1DU, (uint8_t)0x49U, (uint8_t)0xF7U, (uint8_t)0xA8U,
-    (uint8_t)0x44U, (uint8_t)0x3DU, (uint8_t)0x08U, (uint8_t)0x22U, (uint8_t)0xE5U, (uint8_t)0x06U,
-    (uint8_t)0xA9U, (uint8_t)0xF4U, (uint8_t)0x61U, (uint8_t)0x4EU, (uint8_t)0x01U, (uint8_t)0x1EU,
-    (uint8_t)0x2AU, (uint8_t)0x94U, (uint8_t)0x83U, (uint8_t)0x8FU, (uint8_t)0xF8U, (uint8_t)0x8CU,
-    (uint8_t)0xD6U, (uint8_t)0x8CU, (uint8_t)0x8BU, (uint8_t)0xB7U, (uint8_t)0xC5U, (uint8_t)0xC6U,
-    (uint8_t)0x42U, (uint8_t)0x4CU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xCFU, 0xF4U, 0x6AU, 0xAAU, 0x36U, 0xADU, 0x00U, 0x4CU, 0xF6U, 0x00U, 0xC8U,
+    0x38U, 0x1EU, 0x42U, 0x5AU, 0x31U, 0xD9U, 0x51U, 0xAEU, 0x64U, 0xFDU, 0xB2U, 0x3FU, 0xCEU,
+    0xC9U, 0x50U, 0x9DU, 0x43U, 0x68U, 0x7FU, 0xEBU, 0x69U, 0xEDU, 0xD1U, 0xCCU, 0x5EU, 0x0BU,
+    0x8CU, 0xC3U, 0xBDU, 0xF6U, 0x4BU, 0x10U, 0xEFU, 0x86U, 0xB6U, 0x31U, 0x42U, 0xA3U, 0xABU,
+    0x88U, 0x29U, 0x55U, 0x5BU, 0x2FU, 0x74U, 0x7CU, 0x93U, 0x26U, 0x65U, 0xCBU, 0x2CU, 0x0FU,
+    0x1CU, 0xC0U, 0x1BU, 0xD7U, 0x02U, 0x29U, 0x38U, 0x88U, 0x39U, 0xD2U, 0xAFU, 0x05U, 0xE4U,
+    0x54U, 0x50U, 0x4AU, 0xC7U, 0x8BU, 0x75U, 0x82U, 0x82U, 0x28U, 0x46U, 0xC0U, 0xBAU, 0x35U,
+    0xC3U, 0x5FU, 0x5CU, 0x59U, 0x16U, 0x0CU, 0xC0U, 0x46U, 0xFDU, 0x82U, 0x51U, 0x54U, 0x1FU,
+    0xC6U, 0x8CU, 0x9CU, 0x86U, 0xB0U, 0x22U, 0xBBU, 0x70U, 0x99U, 0x87U, 0x6AU, 0x46U, 0x0EU,
+    0x74U, 0x51U, 0xA8U, 0xA9U, 0x31U, 0x09U, 0x70U, 0x3FU, 0xEEU, 0x1CU, 0x21U, 0x7EU, 0x6CU,
+    0x38U, 0x26U, 0xE5U, 0x2CU, 0x51U, 0xAAU, 0x69U, 0x1EU, 0x0EU, 0x42U, 0x3CU, 0xFCU, 0x99U,
+    0xE9U, 0xE3U, 0x16U, 0x50U, 0xC1U, 0x21U, 0x7BU, 0x62U, 0x48U, 0x16U, 0xCDU, 0xADU, 0x9AU,
+    0x95U, 0xF9U, 0xD5U, 0xB8U, 0x01U, 0x94U, 0x88U, 0xD9U, 0xC0U, 0xA0U, 0xA1U, 0xFEU, 0x30U,
+    0x75U, 0xA5U, 0x77U, 0xE2U, 0x31U, 0x83U, 0xF8U, 0x1DU, 0x4AU, 0x3FU, 0x2FU, 0xA4U, 0x57U,
+    0x1EU, 0xFCU, 0x8CU, 0xE0U, 0xBAU, 0x8AU, 0x4FU, 0xE8U, 0xB6U, 0x85U, 0x5DU, 0xFEU, 0x72U,
+    0xB0U, 0xA6U, 0x6EU, 0xDEU, 0xD2U, 0xFBU, 0xABU, 0xFBU, 0xE5U, 0x8AU, 0x30U, 0xFAU, 0xFAU,
+    0xBEU, 0x1CU, 0x5DU, 0x71U, 0xA8U, 0x7EU, 0x2FU, 0x74U, 0x1EU, 0xF8U, 0xC1U, 0xFEU, 0x86U,
+    0xFEU, 0xA6U, 0xBBU, 0xFDU, 0xE5U, 0x30U, 0x67U, 0x7FU, 0x0DU, 0x97U, 0xD1U, 0x1DU, 0x49U,
+    0xF7U, 0xA8U, 0x44U, 0x3DU, 0x08U, 0x22U, 0xE5U, 0x06U, 0xA9U, 0xF4U, 0x61U, 0x4EU, 0x01U,
+    0x1EU, 0x2AU, 0x94U, 0x83U, 0x8FU, 0xF8U, 0x8CU, 0xD6U, 0x8CU, 0x8BU, 0xB7U, 0xC5U, 0xC6U,
+    0x42U, 0x4CU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_K256_PrecompTable.h b/include/msvc/internal/Hacl_K256_PrecompTable.h
index 26bdfa1f..ff15f1c9 100644
--- a/include/msvc/internal/Hacl_K256_PrecompTable.h
+++ b/include/msvc/internal/Hacl_K256_PrecompTable.h
@@ -39,498 +39,378 @@ static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)4496295042185355U,
-    (uint64_t)3125448202219451U, (uint64_t)1239608518490046U, (uint64_t)2687445637493112U,
-    (uint64_t)77979604880139U, (uint64_t)3360310474215011U, (uint64_t)1216410458165163U,
-    (uint64_t)177901593587973U, (uint64_t)3209978938104985U, (uint64_t)118285133003718U,
-    (uint64_t)434519962075150U, (uint64_t)1114612377498854U, (uint64_t)3488596944003813U,
-    (uint64_t)450716531072892U, (uint64_t)66044973203836U, (uint64_t)2822827191156652U,
-    (uint64_t)2417714248626059U, (uint64_t)2173117567943U, (uint64_t)961513119252459U,
-    (uint64_t)233852556538333U, (uint64_t)3014783730323962U, (uint64_t)2955192634004574U,
-    (uint64_t)580546524951282U, (uint64_t)2982973948711252U, (uint64_t)226295722018730U,
-    (uint64_t)26457116218543U, (uint64_t)3401523493637663U, (uint64_t)2597746825024790U,
-    (uint64_t)1789211180483113U, (uint64_t)155862365823427U, (uint64_t)4056806876632134U,
-    (uint64_t)1742291745730568U, (uint64_t)3527759000626890U, (uint64_t)3740578471192596U,
-    (uint64_t)177295097700537U, (uint64_t)1533961415657770U, (uint64_t)4305228982382487U,
-    (uint64_t)4069090871282711U, (uint64_t)4090877481646667U, (uint64_t)220939617041498U,
-    (uint64_t)2057548127959588U, (uint64_t)45185623103252U, (uint64_t)2871963270423449U,
-    (uint64_t)3312974792248749U, (uint64_t)8710601879528U, (uint64_t)570612225194540U,
-    (uint64_t)2045632925323972U, (uint64_t)1263913878297555U, (uint64_t)1294592284757719U,
-    (uint64_t)238067747295054U, (uint64_t)1576659948829386U, (uint64_t)2315159636629917U,
-    (uint64_t)3624867787891655U, (uint64_t)647628266663887U, (uint64_t)75788399640253U,
-    (uint64_t)710811707847797U, (uint64_t)130020650130128U, (uint64_t)1975045425972589U,
-    (uint64_t)136351545314094U, (uint64_t)229292031212337U, (uint64_t)1061471455264148U,
-    (uint64_t)3281312694184822U, (uint64_t)1692442293921797U, (uint64_t)4171008525509513U,
-    (uint64_t)275424696197549U, (uint64_t)1170296303921965U, (uint64_t)4154092952807735U,
-    (uint64_t)4371262070870741U, (uint64_t)835769811036496U, (uint64_t)275812646528189U,
-    (uint64_t)4006745785521764U, (uint64_t)1965172239781114U, (uint64_t)4121055644916429U,
-    (uint64_t)3578995380229569U, (uint64_t)169798870760022U, (uint64_t)1834234783016431U,
-    (uint64_t)3186919121688538U, (uint64_t)1894269993170652U, (uint64_t)868603832348691U,
-    (uint64_t)110978471368876U, (uint64_t)1659296605881532U, (uint64_t)3257830829309297U,
-    (uint64_t)3381509832701119U, (uint64_t)4016163121121296U, (uint64_t)265240263496294U,
-    (uint64_t)4411285343933251U, (uint64_t)728746770806400U, (uint64_t)1767819098558739U,
-    (uint64_t)3002081480892841U, (uint64_t)96312133241935U, (uint64_t)468184501392107U,
-    (uint64_t)2061529496271208U, (uint64_t)801565111628867U, (uint64_t)3380678576799273U,
-    (uint64_t)121814978170941U, (uint64_t)3340363319165433U, (uint64_t)2764604325746928U,
-    (uint64_t)4475755976431968U, (uint64_t)3678073419927081U, (uint64_t)237001357924061U,
-    (uint64_t)4110487014553450U, (uint64_t)442517757833404U, (uint64_t)3976758767423859U,
-    (uint64_t)2559863799262476U, (uint64_t)178144664279213U, (uint64_t)2488702171798051U,
-    (uint64_t)4292079598620208U, (uint64_t)1642918280217329U, (uint64_t)3694920319798108U,
-    (uint64_t)111735528281657U, (uint64_t)2904433967156033U, (uint64_t)4391518032143166U,
-    (uint64_t)3018885875516259U, (uint64_t)3730342681447122U, (uint64_t)10320273322750U,
-    (uint64_t)555845881555519U, (uint64_t)58355404017985U, (uint64_t)379009359053696U,
-    (uint64_t)450317203955503U, (uint64_t)271063299686173U, (uint64_t)910340241794202U,
-    (uint64_t)4145234574853890U, (uint64_t)2059755654702755U, (uint64_t)626530377112246U,
-    (uint64_t)188918989156857U, (uint64_t)3316657461542117U, (uint64_t)778033563170765U,
-    (uint64_t)3568562306532187U, (uint64_t)2888619469733481U, (uint64_t)4364919962337U,
-    (uint64_t)4095057288587059U, (uint64_t)2275461355379988U, (uint64_t)1507422995910897U,
-    (uint64_t)3737691697116252U, (uint64_t)28779913258578U, (uint64_t)131453301647952U,
-    (uint64_t)3613515597508469U, (uint64_t)2389606941441321U, (uint64_t)2135459302594806U,
-    (uint64_t)105517262484263U, (uint64_t)2973432939331401U, (uint64_t)3447096622477885U,
-    (uint64_t)684654106536844U, (uint64_t)2815198316729695U, (uint64_t)280303067216071U,
-    (uint64_t)1841014812927024U, (uint64_t)1181026273060917U, (uint64_t)4092989148457730U,
-    (uint64_t)1381045116206278U, (uint64_t)112475725893965U, (uint64_t)2309144740156686U,
-    (uint64_t)1558825847609352U, (uint64_t)2008068002046292U, (uint64_t)3153511625856423U,
-    (uint64_t)38469701427673U, (uint64_t)4240572315518056U, (uint64_t)2295170987320580U,
-    (uint64_t)187734093837094U, (uint64_t)301041528077172U, (uint64_t)234553141005715U,
-    (uint64_t)4170513699279606U, (uint64_t)1600132848196146U, (uint64_t)3149113064155689U,
-    (uint64_t)2733255352600949U, (uint64_t)144915931419495U, (uint64_t)1221012073888926U,
-    (uint64_t)4395668111081710U, (uint64_t)2464799161496070U, (uint64_t)3664256125241313U,
-    (uint64_t)239705368981290U, (uint64_t)1415181408539490U, (uint64_t)2551836620449074U,
-    (uint64_t)3003106895689578U, (uint64_t)968947218886924U, (uint64_t)270781532362673U,
-    (uint64_t)2905980714350372U, (uint64_t)3246927349288975U, (uint64_t)2653377642686974U,
-    (uint64_t)1577457093418263U, (uint64_t)279488238785848U, (uint64_t)568335962564552U,
-    (uint64_t)4251365041645758U, (uint64_t)1257832559776007U, (uint64_t)2424022444243863U,
-    (uint64_t)261166122046343U, (uint64_t)4399874608082116U, (uint64_t)640509987891568U,
-    (uint64_t)3119706885332220U, (uint64_t)1990185416694007U, (uint64_t)119390098529341U,
-    (uint64_t)220106534694050U, (uint64_t)937225880034895U, (uint64_t)656288151358882U,
-    (uint64_t)1766967254772100U, (uint64_t)197900790969750U, (uint64_t)2992539221608875U,
-    (uint64_t)3960297171111858U, (uint64_t)3499202002925081U, (uint64_t)1103060980924705U,
-    (uint64_t)13670895919578U, (uint64_t)430132744187721U, (uint64_t)1206771838050953U,
-    (uint64_t)2474749300167198U, (uint64_t)296299539510780U, (uint64_t)61565517686436U,
-    (uint64_t)752778559080573U, (uint64_t)3049015829565410U, (uint64_t)3538647632527371U,
-    (uint64_t)1640473028662032U, (uint64_t)182488721849306U, (uint64_t)1234378482161516U,
-    (uint64_t)3736205988606381U, (uint64_t)2814216844344487U, (uint64_t)3877249891529557U,
-    (uint64_t)51681412928433U, (uint64_t)4275336620301239U, (uint64_t)3084074032750651U,
-    (uint64_t)42732308350456U, (uint64_t)3648603591552229U, (uint64_t)142450621701603U,
-    (uint64_t)4020045475009854U, (uint64_t)1050293952073054U, (uint64_t)1974773673079851U,
-    (uint64_t)1815515638724020U, (uint64_t)104845375825434U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+    77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+    3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+    3488596944003813ULL, 450716531072892ULL, 66044973203836ULL, 2822827191156652ULL,
+    2417714248626059ULL, 2173117567943ULL, 961513119252459ULL, 233852556538333ULL,
+    3014783730323962ULL, 2955192634004574ULL, 580546524951282ULL, 2982973948711252ULL,
+    226295722018730ULL, 26457116218543ULL, 3401523493637663ULL, 2597746825024790ULL,
+    1789211180483113ULL, 155862365823427ULL, 4056806876632134ULL, 1742291745730568ULL,
+    3527759000626890ULL, 3740578471192596ULL, 177295097700537ULL, 1533961415657770ULL,
+    4305228982382487ULL, 4069090871282711ULL, 4090877481646667ULL, 220939617041498ULL,
+    2057548127959588ULL, 45185623103252ULL, 2871963270423449ULL, 3312974792248749ULL,
+    8710601879528ULL, 570612225194540ULL, 2045632925323972ULL, 1263913878297555ULL,
+    1294592284757719ULL, 238067747295054ULL, 1576659948829386ULL, 2315159636629917ULL,
+    3624867787891655ULL, 647628266663887ULL, 75788399640253ULL, 710811707847797ULL,
+    130020650130128ULL, 1975045425972589ULL, 136351545314094ULL, 229292031212337ULL,
+    1061471455264148ULL, 3281312694184822ULL, 1692442293921797ULL, 4171008525509513ULL,
+    275424696197549ULL, 1170296303921965ULL, 4154092952807735ULL, 4371262070870741ULL,
+    835769811036496ULL, 275812646528189ULL, 4006745785521764ULL, 1965172239781114ULL,
+    4121055644916429ULL, 3578995380229569ULL, 169798870760022ULL, 1834234783016431ULL,
+    3186919121688538ULL, 1894269993170652ULL, 868603832348691ULL, 110978471368876ULL,
+    1659296605881532ULL, 3257830829309297ULL, 3381509832701119ULL, 4016163121121296ULL,
+    265240263496294ULL, 4411285343933251ULL, 728746770806400ULL, 1767819098558739ULL,
+    3002081480892841ULL, 96312133241935ULL, 468184501392107ULL, 2061529496271208ULL,
+    801565111628867ULL, 3380678576799273ULL, 121814978170941ULL, 3340363319165433ULL,
+    2764604325746928ULL, 4475755976431968ULL, 3678073419927081ULL, 237001357924061ULL,
+    4110487014553450ULL, 442517757833404ULL, 3976758767423859ULL, 2559863799262476ULL,
+    178144664279213ULL, 2488702171798051ULL, 4292079598620208ULL, 1642918280217329ULL,
+    3694920319798108ULL, 111735528281657ULL, 2904433967156033ULL, 4391518032143166ULL,
+    3018885875516259ULL, 3730342681447122ULL, 10320273322750ULL, 555845881555519ULL,
+    58355404017985ULL, 379009359053696ULL, 450317203955503ULL, 271063299686173ULL,
+    910340241794202ULL, 4145234574853890ULL, 2059755654702755ULL, 626530377112246ULL,
+    188918989156857ULL, 3316657461542117ULL, 778033563170765ULL, 3568562306532187ULL,
+    2888619469733481ULL, 4364919962337ULL, 4095057288587059ULL, 2275461355379988ULL,
+    1507422995910897ULL, 3737691697116252ULL, 28779913258578ULL, 131453301647952ULL,
+    3613515597508469ULL, 2389606941441321ULL, 2135459302594806ULL, 105517262484263ULL,
+    2973432939331401ULL, 3447096622477885ULL, 684654106536844ULL, 2815198316729695ULL,
+    280303067216071ULL, 1841014812927024ULL, 1181026273060917ULL, 4092989148457730ULL,
+    1381045116206278ULL, 112475725893965ULL, 2309144740156686ULL, 1558825847609352ULL,
+    2008068002046292ULL, 3153511625856423ULL, 38469701427673ULL, 4240572315518056ULL,
+    2295170987320580ULL, 187734093837094ULL, 301041528077172ULL, 234553141005715ULL,
+    4170513699279606ULL, 1600132848196146ULL, 3149113064155689ULL, 2733255352600949ULL,
+    144915931419495ULL, 1221012073888926ULL, 4395668111081710ULL, 2464799161496070ULL,
+    3664256125241313ULL, 239705368981290ULL, 1415181408539490ULL, 2551836620449074ULL,
+    3003106895689578ULL, 968947218886924ULL, 270781532362673ULL, 2905980714350372ULL,
+    3246927349288975ULL, 2653377642686974ULL, 1577457093418263ULL, 279488238785848ULL,
+    568335962564552ULL, 4251365041645758ULL, 1257832559776007ULL, 2424022444243863ULL,
+    261166122046343ULL, 4399874608082116ULL, 640509987891568ULL, 3119706885332220ULL,
+    1990185416694007ULL, 119390098529341ULL, 220106534694050ULL, 937225880034895ULL,
+    656288151358882ULL, 1766967254772100ULL, 197900790969750ULL, 2992539221608875ULL,
+    3960297171111858ULL, 3499202002925081ULL, 1103060980924705ULL, 13670895919578ULL,
+    430132744187721ULL, 1206771838050953ULL, 2474749300167198ULL, 296299539510780ULL,
+    61565517686436ULL, 752778559080573ULL, 3049015829565410ULL, 3538647632527371ULL,
+    1640473028662032ULL, 182488721849306ULL, 1234378482161516ULL, 3736205988606381ULL,
+    2814216844344487ULL, 3877249891529557ULL, 51681412928433ULL, 4275336620301239ULL,
+    3084074032750651ULL, 42732308350456ULL, 3648603591552229ULL, 142450621701603ULL,
+    4020045475009854ULL, 1050293952073054ULL, 1974773673079851ULL, 1815515638724020ULL,
+    104845375825434ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1277614565900951U,
-    (uint64_t)378671684419493U, (uint64_t)3176260448102880U, (uint64_t)1575691435565077U,
-    (uint64_t)167304528382180U, (uint64_t)2600787765776588U, (uint64_t)7497946149293U,
-    (uint64_t)2184272641272202U, (uint64_t)2200235265236628U, (uint64_t)265969268774814U,
-    (uint64_t)1913228635640715U, (uint64_t)2831959046949342U, (uint64_t)888030405442963U,
-    (uint64_t)1817092932985033U, (uint64_t)101515844997121U, (uint64_t)3309468394859588U,
-    (uint64_t)3965334773689948U, (uint64_t)1945272965790738U, (uint64_t)4450939211427964U,
-    (uint64_t)211349698782702U, (uint64_t)2085160302160079U, (uint64_t)212812506072603U,
-    (uint64_t)3646122434511764U, (uint64_t)1711405092320514U, (uint64_t)95160920508464U,
-    (uint64_t)1677683368518073U, (uint64_t)4384656939250953U, (uint64_t)3548591046529893U,
-    (uint64_t)1683233536091384U, (uint64_t)105919586159941U, (uint64_t)1941416002726455U,
-    (uint64_t)246264372248216U, (uint64_t)3063044110922228U, (uint64_t)3772292170415825U,
-    (uint64_t)222933374989815U, (uint64_t)2417211163452935U, (uint64_t)2018230365573200U,
-    (uint64_t)1985974538911047U, (uint64_t)1387197705332739U, (uint64_t)186400825584956U,
-    (uint64_t)2469330487750329U, (uint64_t)1291983813301638U, (uint64_t)333416733706302U,
-    (uint64_t)3413315564261070U, (uint64_t)189444777569683U, (uint64_t)1062005622360420U,
-    (uint64_t)1800197715938740U, (uint64_t)3693110992551647U, (uint64_t)626990328941945U,
-    (uint64_t)40998857100520U, (uint64_t)3921983552805085U, (uint64_t)1016632437340656U,
-    (uint64_t)4016615929950878U, (uint64_t)2682554586771281U, (uint64_t)7043555162389U,
-    (uint64_t)3333819830676567U, (uint64_t)4120091964944036U, (uint64_t)1960788263484015U,
-    (uint64_t)1642145656273304U, (uint64_t)252814075789128U, (uint64_t)3085777342821357U,
-    (uint64_t)4166637997604052U, (uint64_t)1339401689756469U, (uint64_t)845938529607551U,
-    (uint64_t)223351828189283U, (uint64_t)1148648705186890U, (uint64_t)1230525014760605U,
-    (uint64_t)1869739475126720U, (uint64_t)4193966261205530U, (uint64_t)175684010336013U,
-    (uint64_t)4476719358931508U, (uint64_t)4209547487457638U, (uint64_t)2197536411673724U,
-    (uint64_t)3010838433412303U, (uint64_t)169318997251483U, (uint64_t)49493868302162U,
-    (uint64_t)3594601099078584U, (uint64_t)3662420905445942U, (uint64_t)3606544932233685U,
-    (uint64_t)270643652662165U, (uint64_t)180681786228544U, (uint64_t)2095882682308564U,
-    (uint64_t)813484483841391U, (uint64_t)1622665392824698U, (uint64_t)113821770225137U,
-    (uint64_t)3075432444115417U, (uint64_t)716502989978722U, (uint64_t)2304779892217245U,
-    (uint64_t)1760144151770127U, (uint64_t)235719156963938U, (uint64_t)3180013070471143U,
-    (uint64_t)1331027634540579U, (uint64_t)552273022992392U, (uint64_t)2858693077461887U,
-    (uint64_t)197914407731510U, (uint64_t)187252310910959U, (uint64_t)4160637171377125U,
-    (uint64_t)3225059526713298U, (uint64_t)2574558217383978U, (uint64_t)249695600622489U,
-    (uint64_t)364988742814327U, (uint64_t)4245298536326258U, (uint64_t)1812464706589342U,
-    (uint64_t)2734857123772998U, (uint64_t)120105577124628U, (uint64_t)160179251271109U,
-    (uint64_t)3604555733307834U, (uint64_t)150380003195715U, (uint64_t)1574304909935121U,
-    (uint64_t)142190285600761U, (uint64_t)1835385847725651U, (uint64_t)3168087139615901U,
-    (uint64_t)3201434861713736U, (uint64_t)741757984537760U, (uint64_t)163585009419543U,
-    (uint64_t)3837997981109783U, (uint64_t)3771946407870997U, (uint64_t)2867641360295452U,
-    (uint64_t)3097548691501578U, (uint64_t)124624912142104U, (uint64_t)2729896088769328U,
-    (uint64_t)1087786827035225U, (uint64_t)3934000813818614U, (uint64_t)1176792318645055U,
-    (uint64_t)125311882169270U, (uint64_t)3530709439299502U, (uint64_t)1561477829834527U,
-    (uint64_t)3927894570196761U, (uint64_t)3957765307669212U, (uint64_t)105720519513730U,
-    (uint64_t)3758969845816997U, (uint64_t)2738320452287300U, (uint64_t)2380753632109507U,
-    (uint64_t)2762090901149075U, (uint64_t)123455059136515U, (uint64_t)4222807813169807U,
-    (uint64_t)118064783651432U, (uint64_t)2877694712254934U, (uint64_t)3535027426396448U,
-    (uint64_t)100175663703417U, (uint64_t)3287921121213155U, (uint64_t)4497246481824206U,
-    (uint64_t)1960809949007025U, (uint64_t)3236854264159102U, (uint64_t)35028112623717U,
-    (uint64_t)338838627913273U, (uint64_t)2827531947914645U, (uint64_t)4231826783810670U,
-    (uint64_t)1082490106100389U, (uint64_t)13267544387448U, (uint64_t)4249975884259105U,
-    (uint64_t)2844862161652484U, (uint64_t)262742197948971U, (uint64_t)3525653802457116U,
-    (uint64_t)269963889261701U, (uint64_t)3690062482117102U, (uint64_t)675413453822147U,
-    (uint64_t)2170937868437574U, (uint64_t)2367632187022010U, (uint64_t)214032802409445U,
-    (uint64_t)2054007379612477U, (uint64_t)3558050826739009U, (uint64_t)266827184752634U,
-    (uint64_t)1946520293291195U, (uint64_t)238087872386556U, (uint64_t)490056555385700U,
-    (uint64_t)794405769357386U, (uint64_t)3886901294859702U, (uint64_t)3120414548626348U,
-    (uint64_t)84316625221136U, (uint64_t)223073962531835U, (uint64_t)4280846460577631U,
-    (uint64_t)344296282849308U, (uint64_t)3522116652699457U, (uint64_t)171817232053075U,
-    (uint64_t)3296636283062273U, (uint64_t)3587303364425579U, (uint64_t)1033485783633331U,
-    (uint64_t)3686984130812906U, (uint64_t)268290803650477U, (uint64_t)2803988215834467U,
-    (uint64_t)3821246410529720U, (uint64_t)1077722388925870U, (uint64_t)4187137036866164U,
-    (uint64_t)104696540795905U, (uint64_t)998770003854764U, (uint64_t)3960768137535019U,
-    (uint64_t)4293792474919135U, (uint64_t)3251297981727034U, (uint64_t)192479028790101U,
-    (uint64_t)1175880869349935U, (uint64_t)3506949259311937U, (uint64_t)2161711516160714U,
-    (uint64_t)2506820922270187U, (uint64_t)131002200661047U, (uint64_t)3532399477339994U,
-    (uint64_t)2515815721228719U, (uint64_t)4274974119021502U, (uint64_t)265752394510924U,
-    (uint64_t)163144272153395U, (uint64_t)2824260010502991U, (uint64_t)517077012665142U,
-    (uint64_t)602987073882924U, (uint64_t)2939630061751780U, (uint64_t)59211609557440U,
-    (uint64_t)963423614549333U, (uint64_t)495476232754434U, (uint64_t)94274496109103U,
-    (uint64_t)2245136222990187U, (uint64_t)185414764872288U, (uint64_t)2266067668609289U,
-    (uint64_t)3873978896235927U, (uint64_t)4428283513152105U, (uint64_t)3881481480259312U,
-    (uint64_t)207746202010862U, (uint64_t)1609437858011364U, (uint64_t)477585758421515U,
-    (uint64_t)3850430788664649U, (uint64_t)2682299074459173U, (uint64_t)149439089751274U,
-    (uint64_t)3665760243877698U, (uint64_t)1356661512658931U, (uint64_t)1675903262368322U,
-    (uint64_t)3355649228050892U, (uint64_t)99772108898412U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+    167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+    2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+    888030405442963ULL, 1817092932985033ULL, 101515844997121ULL, 3309468394859588ULL,
+    3965334773689948ULL, 1945272965790738ULL, 4450939211427964ULL, 211349698782702ULL,
+    2085160302160079ULL, 212812506072603ULL, 3646122434511764ULL, 1711405092320514ULL,
+    95160920508464ULL, 1677683368518073ULL, 4384656939250953ULL, 3548591046529893ULL,
+    1683233536091384ULL, 105919586159941ULL, 1941416002726455ULL, 246264372248216ULL,
+    3063044110922228ULL, 3772292170415825ULL, 222933374989815ULL, 2417211163452935ULL,
+    2018230365573200ULL, 1985974538911047ULL, 1387197705332739ULL, 186400825584956ULL,
+    2469330487750329ULL, 1291983813301638ULL, 333416733706302ULL, 3413315564261070ULL,
+    189444777569683ULL, 1062005622360420ULL, 1800197715938740ULL, 3693110992551647ULL,
+    626990328941945ULL, 40998857100520ULL, 3921983552805085ULL, 1016632437340656ULL,
+    4016615929950878ULL, 2682554586771281ULL, 7043555162389ULL, 3333819830676567ULL,
+    4120091964944036ULL, 1960788263484015ULL, 1642145656273304ULL, 252814075789128ULL,
+    3085777342821357ULL, 4166637997604052ULL, 1339401689756469ULL, 845938529607551ULL,
+    223351828189283ULL, 1148648705186890ULL, 1230525014760605ULL, 1869739475126720ULL,
+    4193966261205530ULL, 175684010336013ULL, 4476719358931508ULL, 4209547487457638ULL,
+    2197536411673724ULL, 3010838433412303ULL, 169318997251483ULL, 49493868302162ULL,
+    3594601099078584ULL, 3662420905445942ULL, 3606544932233685ULL, 270643652662165ULL,
+    180681786228544ULL, 2095882682308564ULL, 813484483841391ULL, 1622665392824698ULL,
+    113821770225137ULL, 3075432444115417ULL, 716502989978722ULL, 2304779892217245ULL,
+    1760144151770127ULL, 235719156963938ULL, 3180013070471143ULL, 1331027634540579ULL,
+    552273022992392ULL, 2858693077461887ULL, 197914407731510ULL, 187252310910959ULL,
+    4160637171377125ULL, 3225059526713298ULL, 2574558217383978ULL, 249695600622489ULL,
+    364988742814327ULL, 4245298536326258ULL, 1812464706589342ULL, 2734857123772998ULL,
+    120105577124628ULL, 160179251271109ULL, 3604555733307834ULL, 150380003195715ULL,
+    1574304909935121ULL, 142190285600761ULL, 1835385847725651ULL, 3168087139615901ULL,
+    3201434861713736ULL, 741757984537760ULL, 163585009419543ULL, 3837997981109783ULL,
+    3771946407870997ULL, 2867641360295452ULL, 3097548691501578ULL, 124624912142104ULL,
+    2729896088769328ULL, 1087786827035225ULL, 3934000813818614ULL, 1176792318645055ULL,
+    125311882169270ULL, 3530709439299502ULL, 1561477829834527ULL, 3927894570196761ULL,
+    3957765307669212ULL, 105720519513730ULL, 3758969845816997ULL, 2738320452287300ULL,
+    2380753632109507ULL, 2762090901149075ULL, 123455059136515ULL, 4222807813169807ULL,
+    118064783651432ULL, 2877694712254934ULL, 3535027426396448ULL, 100175663703417ULL,
+    3287921121213155ULL, 4497246481824206ULL, 1960809949007025ULL, 3236854264159102ULL,
+    35028112623717ULL, 338838627913273ULL, 2827531947914645ULL, 4231826783810670ULL,
+    1082490106100389ULL, 13267544387448ULL, 4249975884259105ULL, 2844862161652484ULL,
+    262742197948971ULL, 3525653802457116ULL, 269963889261701ULL, 3690062482117102ULL,
+    675413453822147ULL, 2170937868437574ULL, 2367632187022010ULL, 214032802409445ULL,
+    2054007379612477ULL, 3558050826739009ULL, 266827184752634ULL, 1946520293291195ULL,
+    238087872386556ULL, 490056555385700ULL, 794405769357386ULL, 3886901294859702ULL,
+    3120414548626348ULL, 84316625221136ULL, 223073962531835ULL, 4280846460577631ULL,
+    344296282849308ULL, 3522116652699457ULL, 171817232053075ULL, 3296636283062273ULL,
+    3587303364425579ULL, 1033485783633331ULL, 3686984130812906ULL, 268290803650477ULL,
+    2803988215834467ULL, 3821246410529720ULL, 1077722388925870ULL, 4187137036866164ULL,
+    104696540795905ULL, 998770003854764ULL, 3960768137535019ULL, 4293792474919135ULL,
+    3251297981727034ULL, 192479028790101ULL, 1175880869349935ULL, 3506949259311937ULL,
+    2161711516160714ULL, 2506820922270187ULL, 131002200661047ULL, 3532399477339994ULL,
+    2515815721228719ULL, 4274974119021502ULL, 265752394510924ULL, 163144272153395ULL,
+    2824260010502991ULL, 517077012665142ULL, 602987073882924ULL, 2939630061751780ULL,
+    59211609557440ULL, 963423614549333ULL, 495476232754434ULL, 94274496109103ULL,
+    2245136222990187ULL, 185414764872288ULL, 2266067668609289ULL, 3873978896235927ULL,
+    4428283513152105ULL, 3881481480259312ULL, 207746202010862ULL, 1609437858011364ULL,
+    477585758421515ULL, 3850430788664649ULL, 2682299074459173ULL, 149439089751274ULL,
+    3665760243877698ULL, 1356661512658931ULL, 1675903262368322ULL, 3355649228050892ULL,
+    99772108898412ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)34056422761564U,
-    (uint64_t)3315864838337811U, (uint64_t)3797032336888745U, (uint64_t)2580641850480806U,
-    (uint64_t)208048944042500U, (uint64_t)1233795288689421U, (uint64_t)1048795233382631U,
-    (uint64_t)646545158071530U, (uint64_t)1816025742137285U, (uint64_t)12245672982162U,
-    (uint64_t)2119364213800870U, (uint64_t)2034960311715107U, (uint64_t)3172697815804487U,
-    (uint64_t)4185144850224160U, (uint64_t)2792055915674U, (uint64_t)795534452139321U,
-    (uint64_t)3647836177838185U, (uint64_t)2681403398797991U, (uint64_t)3149264270306207U,
-    (uint64_t)278704080615511U, (uint64_t)2752552368344718U, (uint64_t)1363840972378818U,
-    (uint64_t)1877521512083293U, (uint64_t)1862111388059470U, (uint64_t)36200324115014U,
-    (uint64_t)4183622899327217U, (uint64_t)747381675363076U, (uint64_t)2772916395314624U,
-    (uint64_t)833767013119965U, (uint64_t)246274452928088U, (uint64_t)1526238021297781U,
-    (uint64_t)3327534966022747U, (uint64_t)1169012581910517U, (uint64_t)4430894603030025U,
-    (uint64_t)149242742442115U, (uint64_t)1002569704307172U, (uint64_t)2763252093432365U,
-    (uint64_t)3037748497732938U, (uint64_t)2329811173939457U, (uint64_t)270769113180752U,
-    (uint64_t)4344092461623432U, (uint64_t)892200524589382U, (uint64_t)2511418516713970U,
-    (uint64_t)103575031265398U, (uint64_t)183736033430252U, (uint64_t)583003071257308U,
-    (uint64_t)3357167344738425U, (uint64_t)4038099763242651U, (uint64_t)1776250620957255U,
-    (uint64_t)51334115864192U, (uint64_t)2616405698969611U, (uint64_t)1196364755910565U,
-    (uint64_t)3135228056210500U, (uint64_t)533729417611761U, (uint64_t)86564351229326U,
-    (uint64_t)98936129527281U, (uint64_t)4425305036630677U, (uint64_t)2980296390253408U,
-    (uint64_t)2487091677325739U, (uint64_t)10501977234280U, (uint64_t)1805646499831077U,
-    (uint64_t)3120615962395477U, (uint64_t)3634629685307533U, (uint64_t)3009632755291436U,
-    (uint64_t)16794051906523U, (uint64_t)2465481597883214U, (uint64_t)211492787490403U,
-    (uint64_t)1120942867046103U, (uint64_t)486438308572108U, (uint64_t)76058986271771U,
-    (uint64_t)2435216584587357U, (uint64_t)3076359381968283U, (uint64_t)1071594491489655U,
-    (uint64_t)3148707450339154U, (uint64_t)249332205737851U, (uint64_t)4171051176626809U,
-    (uint64_t)3165176227956388U, (uint64_t)2400901591835233U, (uint64_t)1435783621333022U,
-    (uint64_t)20312753440321U, (uint64_t)1767293887448005U, (uint64_t)685150647587522U,
-    (uint64_t)2957187934449906U, (uint64_t)382661319140439U, (uint64_t)177583591139601U,
-    (uint64_t)2083572648630743U, (uint64_t)1083410277889419U, (uint64_t)4267902097868310U,
-    (uint64_t)679989918385081U, (uint64_t)123155311554032U, (uint64_t)2830267662472020U,
-    (uint64_t)4476040509735924U, (uint64_t)526697201585144U, (uint64_t)3465306430573135U,
-    (uint64_t)2296616218591U, (uint64_t)1270626872734279U, (uint64_t)1049740198790549U,
-    (uint64_t)4197567214843444U, (uint64_t)1962225231320591U, (uint64_t)186125026796856U,
-    (uint64_t)737027567341142U, (uint64_t)4364616098174U, (uint64_t)3618884818756660U,
-    (uint64_t)1236837563717668U, (uint64_t)162873772439548U, (uint64_t)3081542470065122U,
-    (uint64_t)910331750163991U, (uint64_t)2110498143869827U, (uint64_t)3208473121852657U,
-    (uint64_t)94687786224509U, (uint64_t)4113309027567819U, (uint64_t)4272179438357536U,
-    (uint64_t)1857418654076140U, (uint64_t)1672678841741004U, (uint64_t)94482160248411U,
-    (uint64_t)1928652436799020U, (uint64_t)1750866462381515U, (uint64_t)4048060485672270U,
-    (uint64_t)4006680581258587U, (uint64_t)14850434761312U, (uint64_t)2828734997081648U,
-    (uint64_t)1975589525873972U, (uint64_t)3724347738416009U, (uint64_t)597163266689736U,
-    (uint64_t)14568362978551U, (uint64_t)2203865455839744U, (uint64_t)2237034958890595U,
-    (uint64_t)1863572986731818U, (uint64_t)2329774560279041U, (uint64_t)245105447642201U,
-    (uint64_t)2179697447864822U, (uint64_t)1769609498189882U, (uint64_t)1916950746430931U,
-    (uint64_t)847019613787312U, (uint64_t)163210606565100U, (uint64_t)3658248417400062U,
-    (uint64_t)717138296045881U, (uint64_t)42531212306121U, (uint64_t)1040915917097532U,
-    (uint64_t)77364489101310U, (uint64_t)539253504015590U, (uint64_t)732690726289841U,
-    (uint64_t)3401622034697806U, (uint64_t)2864593278358513U, (uint64_t)142611941887017U,
-    (uint64_t)536364617506702U, (uint64_t)845071859974284U, (uint64_t)4461787417089721U,
-    (uint64_t)2633811871939723U, (uint64_t)113619731985610U, (uint64_t)2535870015489566U,
-    (uint64_t)2146224665077830U, (uint64_t)2593725534662047U, (uint64_t)1332349537449710U,
-    (uint64_t)153375287068096U, (uint64_t)3689977177165276U, (uint64_t)3631865615314120U,
-    (uint64_t)184644878348929U, (uint64_t)2220481726602813U, (uint64_t)204002551273091U,
-    (uint64_t)3022560051766785U, (uint64_t)3125940458001213U, (uint64_t)4258299086906325U,
-    (uint64_t)1072471915162030U, (uint64_t)2797562724530U, (uint64_t)3974298156223059U,
-    (uint64_t)1624778551002554U, (uint64_t)3490703864485971U, (uint64_t)2533877484212458U,
-    (uint64_t)176107782538555U, (uint64_t)4275987398312137U, (uint64_t)4397120757693722U,
-    (uint64_t)3001292763847390U, (uint64_t)1556490837621310U, (uint64_t)70442953037671U,
-    (uint64_t)1558915972545974U, (uint64_t)744724505252845U, (uint64_t)2697230204313363U,
-    (uint64_t)3495671924212144U, (uint64_t)95744296878924U, (uint64_t)1508848630912047U,
-    (uint64_t)4163599342850968U, (uint64_t)1234988733935901U, (uint64_t)3789722472212706U,
-    (uint64_t)219522007052022U, (uint64_t)2106597506701262U, (uint64_t)3231115099832239U,
-    (uint64_t)1296436890593905U, (uint64_t)1016795619587656U, (uint64_t)231150565033388U,
-    (uint64_t)4205501688458754U, (uint64_t)2271569140386062U, (uint64_t)3421769599058157U,
-    (uint64_t)4118408853784554U, (uint64_t)276709341465173U, (uint64_t)2681340614854362U,
-    (uint64_t)2514413365628788U, (uint64_t)62294545067341U, (uint64_t)277610220069365U,
-    (uint64_t)252463150123799U, (uint64_t)2547353593759399U, (uint64_t)1857438147448607U,
-    (uint64_t)2964811969681256U, (uint64_t)3303706463835387U, (uint64_t)248936570980853U,
-    (uint64_t)3208982702478009U, (uint64_t)2518671051730787U, (uint64_t)727433853033835U,
-    (uint64_t)1290389308223446U, (uint64_t)220742793981035U, (uint64_t)3851225361654709U,
-    (uint64_t)2307489307934273U, (uint64_t)1151710489948266U, (uint64_t)289775285210516U,
-    (uint64_t)222685002397295U, (uint64_t)1222117478082108U, (uint64_t)2822029169395728U,
-    (uint64_t)1172146252219882U, (uint64_t)2626108105510259U, (uint64_t)209803527887167U,
-    (uint64_t)2718831919953281U, (uint64_t)4348638387588593U, (uint64_t)3761438313263183U,
-    (uint64_t)13169515318095U, (uint64_t)212893621229476U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+    208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+    1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+    3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL, 795534452139321ULL,
+    3647836177838185ULL, 2681403398797991ULL, 3149264270306207ULL, 278704080615511ULL,
+    2752552368344718ULL, 1363840972378818ULL, 1877521512083293ULL, 1862111388059470ULL,
+    36200324115014ULL, 4183622899327217ULL, 747381675363076ULL, 2772916395314624ULL,
+    833767013119965ULL, 246274452928088ULL, 1526238021297781ULL, 3327534966022747ULL,
+    1169012581910517ULL, 4430894603030025ULL, 149242742442115ULL, 1002569704307172ULL,
+    2763252093432365ULL, 3037748497732938ULL, 2329811173939457ULL, 270769113180752ULL,
+    4344092461623432ULL, 892200524589382ULL, 2511418516713970ULL, 103575031265398ULL,
+    183736033430252ULL, 583003071257308ULL, 3357167344738425ULL, 4038099763242651ULL,
+    1776250620957255ULL, 51334115864192ULL, 2616405698969611ULL, 1196364755910565ULL,
+    3135228056210500ULL, 533729417611761ULL, 86564351229326ULL, 98936129527281ULL,
+    4425305036630677ULL, 2980296390253408ULL, 2487091677325739ULL, 10501977234280ULL,
+    1805646499831077ULL, 3120615962395477ULL, 3634629685307533ULL, 3009632755291436ULL,
+    16794051906523ULL, 2465481597883214ULL, 211492787490403ULL, 1120942867046103ULL,
+    486438308572108ULL, 76058986271771ULL, 2435216584587357ULL, 3076359381968283ULL,
+    1071594491489655ULL, 3148707450339154ULL, 249332205737851ULL, 4171051176626809ULL,
+    3165176227956388ULL, 2400901591835233ULL, 1435783621333022ULL, 20312753440321ULL,
+    1767293887448005ULL, 685150647587522ULL, 2957187934449906ULL, 382661319140439ULL,
+    177583591139601ULL, 2083572648630743ULL, 1083410277889419ULL, 4267902097868310ULL,
+    679989918385081ULL, 123155311554032ULL, 2830267662472020ULL, 4476040509735924ULL,
+    526697201585144ULL, 3465306430573135ULL, 2296616218591ULL, 1270626872734279ULL,
+    1049740198790549ULL, 4197567214843444ULL, 1962225231320591ULL, 186125026796856ULL,
+    737027567341142ULL, 4364616098174ULL, 3618884818756660ULL, 1236837563717668ULL,
+    162873772439548ULL, 3081542470065122ULL, 910331750163991ULL, 2110498143869827ULL,
+    3208473121852657ULL, 94687786224509ULL, 4113309027567819ULL, 4272179438357536ULL,
+    1857418654076140ULL, 1672678841741004ULL, 94482160248411ULL, 1928652436799020ULL,
+    1750866462381515ULL, 4048060485672270ULL, 4006680581258587ULL, 14850434761312ULL,
+    2828734997081648ULL, 1975589525873972ULL, 3724347738416009ULL, 597163266689736ULL,
+    14568362978551ULL, 2203865455839744ULL, 2237034958890595ULL, 1863572986731818ULL,
+    2329774560279041ULL, 245105447642201ULL, 2179697447864822ULL, 1769609498189882ULL,
+    1916950746430931ULL, 847019613787312ULL, 163210606565100ULL, 3658248417400062ULL,
+    717138296045881ULL, 42531212306121ULL, 1040915917097532ULL, 77364489101310ULL,
+    539253504015590ULL, 732690726289841ULL, 3401622034697806ULL, 2864593278358513ULL,
+    142611941887017ULL, 536364617506702ULL, 845071859974284ULL, 4461787417089721ULL,
+    2633811871939723ULL, 113619731985610ULL, 2535870015489566ULL, 2146224665077830ULL,
+    2593725534662047ULL, 1332349537449710ULL, 153375287068096ULL, 3689977177165276ULL,
+    3631865615314120ULL, 184644878348929ULL, 2220481726602813ULL, 204002551273091ULL,
+    3022560051766785ULL, 3125940458001213ULL, 4258299086906325ULL, 1072471915162030ULL,
+    2797562724530ULL, 3974298156223059ULL, 1624778551002554ULL, 3490703864485971ULL,
+    2533877484212458ULL, 176107782538555ULL, 4275987398312137ULL, 4397120757693722ULL,
+    3001292763847390ULL, 1556490837621310ULL, 70442953037671ULL, 1558915972545974ULL,
+    744724505252845ULL, 2697230204313363ULL, 3495671924212144ULL, 95744296878924ULL,
+    1508848630912047ULL, 4163599342850968ULL, 1234988733935901ULL, 3789722472212706ULL,
+    219522007052022ULL, 2106597506701262ULL, 3231115099832239ULL, 1296436890593905ULL,
+    1016795619587656ULL, 231150565033388ULL, 4205501688458754ULL, 2271569140386062ULL,
+    3421769599058157ULL, 4118408853784554ULL, 276709341465173ULL, 2681340614854362ULL,
+    2514413365628788ULL, 62294545067341ULL, 277610220069365ULL, 252463150123799ULL,
+    2547353593759399ULL, 1857438147448607ULL, 2964811969681256ULL, 3303706463835387ULL,
+    248936570980853ULL, 3208982702478009ULL, 2518671051730787ULL, 727433853033835ULL,
+    1290389308223446ULL, 220742793981035ULL, 3851225361654709ULL, 2307489307934273ULL,
+    1151710489948266ULL, 289775285210516ULL, 222685002397295ULL, 1222117478082108ULL,
+    2822029169395728ULL, 1172146252219882ULL, 2626108105510259ULL, 209803527887167ULL,
+    2718831919953281ULL, 4348638387588593ULL, 3761438313263183ULL, 13169515318095ULL,
+    212893621229476ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w5[480U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U,
-    (uint64_t)2104196179349630U, (uint64_t)501632371208418U, (uint64_t)1666838991431177U,
-    (uint64_t)445606193139838U, (uint64_t)73704603396096U, (uint64_t)3140284774064777U,
-    (uint64_t)1356066420820179U, (uint64_t)227054159419281U, (uint64_t)1847611229198687U,
-    (uint64_t)82327838827660U, (uint64_t)3704027573265803U, (uint64_t)1585260489220244U,
-    (uint64_t)4404647914931933U, (uint64_t)2424649827425515U, (uint64_t)206821944206116U,
-    (uint64_t)1508635776287972U, (uint64_t)1933584575629676U, (uint64_t)1903635423783032U,
-    (uint64_t)4193642165165650U, (uint64_t)234321074690644U, (uint64_t)210406774251925U,
-    (uint64_t)1965845668185599U, (uint64_t)3059839433804731U, (uint64_t)1933300510683631U,
-    (uint64_t)150696600689211U, (uint64_t)4069293682158567U, (uint64_t)4346344602660044U,
-    (uint64_t)312200249664561U, (uint64_t)2495020807621840U, (uint64_t)1912707714385U,
-    (uint64_t)299345978159762U, (uint64_t)1164752722686920U, (uint64_t)225322433710338U,
-    (uint64_t)3128747381283759U, (uint64_t)275659067815583U, (uint64_t)1489671057429039U,
-    (uint64_t)1567693343342676U, (uint64_t)921672046098071U, (uint64_t)3707418899384085U,
-    (uint64_t)54646424931593U, (uint64_t)4026733380127147U, (uint64_t)2933435393699231U,
-    (uint64_t)3356593659521967U, (uint64_t)3637750749325529U, (uint64_t)232939412379045U,
-    (uint64_t)2298399636043069U, (uint64_t)270361546063041U, (uint64_t)2523933572551420U,
-    (uint64_t)3456896091572950U, (uint64_t)185447004732850U, (uint64_t)429322937697821U,
-    (uint64_t)2579704215668222U, (uint64_t)695065378803349U, (uint64_t)3987916247731243U,
-    (uint64_t)255159546348233U, (uint64_t)3057777929921282U, (uint64_t)1608970699916312U,
-    (uint64_t)1902369623063807U, (uint64_t)1413619643652777U, (uint64_t)94983996321227U,
-    (uint64_t)2832873179548050U, (uint64_t)4335430233622555U, (uint64_t)1559023976028843U,
-    (uint64_t)3297181988648895U, (uint64_t)100072021232323U, (uint64_t)2124984034109675U,
-    (uint64_t)4501252835618918U, (uint64_t)2053336899483297U, (uint64_t)638807226463876U,
-    (uint64_t)278445213600634U, (uint64_t)2311236445660555U, (uint64_t)303317664040012U,
-    (uint64_t)2659353858089024U, (uint64_t)3598827423980130U, (uint64_t)176059343827873U,
-    (uint64_t)3891639526275437U, (uint64_t)252823982819463U, (uint64_t)3404823300622345U,
-    (uint64_t)2758370772497456U, (uint64_t)91397496598783U, (uint64_t)2248661144141892U,
-    (uint64_t)491087075271969U, (uint64_t)1786344894571315U, (uint64_t)452497694885923U,
-    (uint64_t)34039628873357U, (uint64_t)2116503165025197U, (uint64_t)4436733709429923U,
-    (uint64_t)3045800776819238U, (uint64_t)1385518906078375U, (uint64_t)110495603336764U,
-    (uint64_t)4051447296249587U, (uint64_t)1103557421498625U, (uint64_t)1840785058439622U,
-    (uint64_t)425322753992314U, (uint64_t)98330046771676U, (uint64_t)365407468686431U,
-    (uint64_t)2611246859977123U, (uint64_t)3050253933135339U, (uint64_t)1006482220896688U,
-    (uint64_t)166818196428389U, (uint64_t)3415236093104372U, (uint64_t)1762308883882288U,
-    (uint64_t)1327828123094558U, (uint64_t)3403946425556706U, (uint64_t)96503464455441U,
-    (uint64_t)3893015304031471U, (uint64_t)3740839477490397U, (uint64_t)2411470812852231U,
-    (uint64_t)940927462436211U, (uint64_t)163825285911099U, (uint64_t)1622441495640386U,
-    (uint64_t)850224095680266U, (uint64_t)76199085900939U, (uint64_t)1941852365144042U,
-    (uint64_t)140326673652807U, (uint64_t)3161611011249524U, (uint64_t)317297150009965U,
-    (uint64_t)2145053259340619U, (uint64_t)2180498176457552U, (uint64_t)38457740506224U,
-    (uint64_t)394174899129468U, (uint64_t)2687474560485245U, (uint64_t)1542175980184516U,
-    (uint64_t)1628502671124819U, (uint64_t)48477401124385U, (uint64_t)4474181600025082U,
-    (uint64_t)2142747956365708U, (uint64_t)1638299432475478U, (uint64_t)2005869320353249U,
-    (uint64_t)112292630760956U, (uint64_t)1887521965171588U, (uint64_t)457587531429696U,
-    (uint64_t)840994209504042U, (uint64_t)4268060856325798U, (uint64_t)195597993440388U,
-    (uint64_t)4148484749020338U, (uint64_t)2074885000909672U, (uint64_t)2309839019263165U,
-    (uint64_t)2087616209681024U, (uint64_t)257214370719966U, (uint64_t)2331363508376581U,
-    (uint64_t)1233124357504711U, (uint64_t)2849542202650296U, (uint64_t)3790982825325736U,
-    (uint64_t)13381453503890U, (uint64_t)1665246594531069U, (uint64_t)4165624287443904U,
-    (uint64_t)3418759698027493U, (uint64_t)2118493255117399U, (uint64_t)136249206366067U,
-    (uint64_t)4064050233283309U, (uint64_t)1368779887911300U, (uint64_t)4370550759530269U,
-    (uint64_t)66992990631341U, (uint64_t)84442368922270U, (uint64_t)2139322635321394U,
-    (uint64_t)2076163483726795U, (uint64_t)657097866349103U, (uint64_t)2095579409488071U,
-    (uint64_t)226525774791341U, (uint64_t)4445744257665359U, (uint64_t)2035752839278107U,
-    (uint64_t)1998242662838304U, (uint64_t)1601548415521694U, (uint64_t)151297684296198U,
-    (uint64_t)1350963039017303U, (uint64_t)2624916349548281U, (uint64_t)2018863259670197U,
-    (uint64_t)2717274357461290U, (uint64_t)94024796961533U, (uint64_t)711335520409111U,
-    (uint64_t)4322093765820263U, (uint64_t)2041650358174649U, (uint64_t)3439791603157577U,
-    (uint64_t)179292018616267U, (uint64_t)2436436921286669U, (uint64_t)3905268797208340U,
-    (uint64_t)2829194895162985U, (uint64_t)1355175382191543U, (uint64_t)55128779761539U,
-    (uint64_t)2648428998786922U, (uint64_t)869805912573515U, (uint64_t)3706708942847864U,
-    (uint64_t)2785288916584667U, (uint64_t)37156862850147U, (uint64_t)1422245336293228U,
-    (uint64_t)4497066058933021U, (uint64_t)85588912978349U, (uint64_t)2616252221194611U,
-    (uint64_t)53506393720989U, (uint64_t)3727539190732644U, (uint64_t)872132446545237U,
-    (uint64_t)933583590986077U, (uint64_t)3794591170581203U, (uint64_t)167875550514069U,
-    (uint64_t)2267466834993297U, (uint64_t)3072652681756816U, (uint64_t)2108499037430803U,
-    (uint64_t)1606735192928366U, (uint64_t)72339568815255U, (uint64_t)3258484260684219U,
-    (uint64_t)3277927277719855U, (uint64_t)2459560373011535U, (uint64_t)1672794293294033U,
-    (uint64_t)227460934880669U, (uint64_t)3702454405413705U, (uint64_t)106168148441676U,
-    (uint64_t)1356617643071159U, (uint64_t)3280896569942762U, (uint64_t)142618711614302U,
-    (uint64_t)4291782740862057U, (uint64_t)4141020884874235U, (uint64_t)3720787221267125U,
-    (uint64_t)552884940089351U, (uint64_t)174626154407180U, (uint64_t)972071013326540U,
-    (uint64_t)4458530419931903U, (uint64_t)4435168973822858U, (uint64_t)1902967548748411U,
-    (uint64_t)53007977605840U, (uint64_t)2453997334323925U, (uint64_t)3653077937283262U,
-    (uint64_t)850660265046356U, (uint64_t)312721924805450U, (uint64_t)268503679240683U,
-    (uint64_t)256960167714122U, (uint64_t)1474492507858350U, (uint64_t)2456345526438488U,
-    (uint64_t)3686029507160255U, (uint64_t)279158933010398U, (uint64_t)3646946293948063U,
-    (uint64_t)704477527214036U, (uint64_t)3387744169891031U, (uint64_t)3772622670980241U,
-    (uint64_t)136368897543304U, (uint64_t)3744894052577607U, (uint64_t)1976007214443430U,
-    (uint64_t)2090045379763451U, (uint64_t)968565474458988U, (uint64_t)234295114806066U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL, 2104196179349630ULL, 501632371208418ULL, 1666838991431177ULL,
+    445606193139838ULL, 73704603396096ULL, 3140284774064777ULL, 1356066420820179ULL,
+    227054159419281ULL, 1847611229198687ULL, 82327838827660ULL, 3704027573265803ULL,
+    1585260489220244ULL, 4404647914931933ULL, 2424649827425515ULL, 206821944206116ULL,
+    1508635776287972ULL, 1933584575629676ULL, 1903635423783032ULL, 4193642165165650ULL,
+    234321074690644ULL, 210406774251925ULL, 1965845668185599ULL, 3059839433804731ULL,
+    1933300510683631ULL, 150696600689211ULL, 4069293682158567ULL, 4346344602660044ULL,
+    312200249664561ULL, 2495020807621840ULL, 1912707714385ULL, 299345978159762ULL,
+    1164752722686920ULL, 225322433710338ULL, 3128747381283759ULL, 275659067815583ULL,
+    1489671057429039ULL, 1567693343342676ULL, 921672046098071ULL, 3707418899384085ULL,
+    54646424931593ULL, 4026733380127147ULL, 2933435393699231ULL, 3356593659521967ULL,
+    3637750749325529ULL, 232939412379045ULL, 2298399636043069ULL, 270361546063041ULL,
+    2523933572551420ULL, 3456896091572950ULL, 185447004732850ULL, 429322937697821ULL,
+    2579704215668222ULL, 695065378803349ULL, 3987916247731243ULL, 255159546348233ULL,
+    3057777929921282ULL, 1608970699916312ULL, 1902369623063807ULL, 1413619643652777ULL,
+    94983996321227ULL, 2832873179548050ULL, 4335430233622555ULL, 1559023976028843ULL,
+    3297181988648895ULL, 100072021232323ULL, 2124984034109675ULL, 4501252835618918ULL,
+    2053336899483297ULL, 638807226463876ULL, 278445213600634ULL, 2311236445660555ULL,
+    303317664040012ULL, 2659353858089024ULL, 3598827423980130ULL, 176059343827873ULL,
+    3891639526275437ULL, 252823982819463ULL, 3404823300622345ULL, 2758370772497456ULL,
+    91397496598783ULL, 2248661144141892ULL, 491087075271969ULL, 1786344894571315ULL,
+    452497694885923ULL, 34039628873357ULL, 2116503165025197ULL, 4436733709429923ULL,
+    3045800776819238ULL, 1385518906078375ULL, 110495603336764ULL, 4051447296249587ULL,
+    1103557421498625ULL, 1840785058439622ULL, 425322753992314ULL, 98330046771676ULL,
+    365407468686431ULL, 2611246859977123ULL, 3050253933135339ULL, 1006482220896688ULL,
+    166818196428389ULL, 3415236093104372ULL, 1762308883882288ULL, 1327828123094558ULL,
+    3403946425556706ULL, 96503464455441ULL, 3893015304031471ULL, 3740839477490397ULL,
+    2411470812852231ULL, 940927462436211ULL, 163825285911099ULL, 1622441495640386ULL,
+    850224095680266ULL, 76199085900939ULL, 1941852365144042ULL, 140326673652807ULL,
+    3161611011249524ULL, 317297150009965ULL, 2145053259340619ULL, 2180498176457552ULL,
+    38457740506224ULL, 394174899129468ULL, 2687474560485245ULL, 1542175980184516ULL,
+    1628502671124819ULL, 48477401124385ULL, 4474181600025082ULL, 2142747956365708ULL,
+    1638299432475478ULL, 2005869320353249ULL, 112292630760956ULL, 1887521965171588ULL,
+    457587531429696ULL, 840994209504042ULL, 4268060856325798ULL, 195597993440388ULL,
+    4148484749020338ULL, 2074885000909672ULL, 2309839019263165ULL, 2087616209681024ULL,
+    257214370719966ULL, 2331363508376581ULL, 1233124357504711ULL, 2849542202650296ULL,
+    3790982825325736ULL, 13381453503890ULL, 1665246594531069ULL, 4165624287443904ULL,
+    3418759698027493ULL, 2118493255117399ULL, 136249206366067ULL, 4064050233283309ULL,
+    1368779887911300ULL, 4370550759530269ULL, 66992990631341ULL, 84442368922270ULL,
+    2139322635321394ULL, 2076163483726795ULL, 657097866349103ULL, 2095579409488071ULL,
+    226525774791341ULL, 4445744257665359ULL, 2035752839278107ULL, 1998242662838304ULL,
+    1601548415521694ULL, 151297684296198ULL, 1350963039017303ULL, 2624916349548281ULL,
+    2018863259670197ULL, 2717274357461290ULL, 94024796961533ULL, 711335520409111ULL,
+    4322093765820263ULL, 2041650358174649ULL, 3439791603157577ULL, 179292018616267ULL,
+    2436436921286669ULL, 3905268797208340ULL, 2829194895162985ULL, 1355175382191543ULL,
+    55128779761539ULL, 2648428998786922ULL, 869805912573515ULL, 3706708942847864ULL,
+    2785288916584667ULL, 37156862850147ULL, 1422245336293228ULL, 4497066058933021ULL,
+    85588912978349ULL, 2616252221194611ULL, 53506393720989ULL, 3727539190732644ULL,
+    872132446545237ULL, 933583590986077ULL, 3794591170581203ULL, 167875550514069ULL,
+    2267466834993297ULL, 3072652681756816ULL, 2108499037430803ULL, 1606735192928366ULL,
+    72339568815255ULL, 3258484260684219ULL, 3277927277719855ULL, 2459560373011535ULL,
+    1672794293294033ULL, 227460934880669ULL, 3702454405413705ULL, 106168148441676ULL,
+    1356617643071159ULL, 3280896569942762ULL, 142618711614302ULL, 4291782740862057ULL,
+    4141020884874235ULL, 3720787221267125ULL, 552884940089351ULL, 174626154407180ULL,
+    972071013326540ULL, 4458530419931903ULL, 4435168973822858ULL, 1902967548748411ULL,
+    53007977605840ULL, 2453997334323925ULL, 3653077937283262ULL, 850660265046356ULL,
+    312721924805450ULL, 268503679240683ULL, 256960167714122ULL, 1474492507858350ULL,
+    2456345526438488ULL, 3686029507160255ULL, 279158933010398ULL, 3646946293948063ULL,
+    704477527214036ULL, 3387744169891031ULL, 3772622670980241ULL, 136368897543304ULL,
+    3744894052577607ULL, 1976007214443430ULL, 2090045379763451ULL, 968565474458988ULL,
+    234295114806066ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_P256_PrecompTable.h b/include/msvc/internal/Hacl_P256_PrecompTable.h
index f185c2be..c852ef8c 100644
--- a/include/msvc/internal/Hacl_P256_PrecompTable.h
+++ b/include/msvc/internal/Hacl_P256_PrecompTable.h
@@ -39,476 +39,360 @@ static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1499621593102562565U,
-    (uint64_t)16692369783039433128U, (uint64_t)15337520135922861848U,
-    (uint64_t)5455737214495366228U, (uint64_t)17827017231032529600U,
-    (uint64_t)12413621606240782649U, (uint64_t)2290483008028286132U,
-    (uint64_t)15752017553340844820U, (uint64_t)4846430910634234874U,
-    (uint64_t)10861682798464583253U, (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U,
-    (uint64_t)9866710912401645115U, (uint64_t)1162548847543228595U, (uint64_t)7649967190445130486U,
-    (uint64_t)5212340432230915749U, (uint64_t)7572620550182916491U, (uint64_t)14876145112448665096U,
-    (uint64_t)2063227348838176167U, (uint64_t)3519435548295415847U, (uint64_t)8390400282019023103U,
-    (uint64_t)17666843593163037841U, (uint64_t)9450204148816496323U, (uint64_t)8483374507652916768U,
-    (uint64_t)6254661047265818424U, (uint64_t)16382127809582285023U, (uint64_t)125359443771153172U,
-    (uint64_t)1374336701588437897U, (uint64_t)11362596098420127726U, (uint64_t)2101654420738681387U,
-    (uint64_t)12772780342444840510U, (uint64_t)12546934328908550060U,
-    (uint64_t)8331880412333790397U, (uint64_t)11687262051473819904U, (uint64_t)8926848496503457587U,
-    (uint64_t)9603974142010467857U, (uint64_t)13199952163826973175U, (uint64_t)2189856264898797734U,
-    (uint64_t)11356074861870267226U, (uint64_t)2027714896422561895U, (uint64_t)5261606367808050149U,
-    (uint64_t)153855954337762312U, (uint64_t)6375919692894573986U, (uint64_t)12364041207536146533U,
-    (uint64_t)1891896010455057160U, (uint64_t)1568123795087313171U, (uint64_t)18138710056556660101U,
-    (uint64_t)6004886947510047736U, (uint64_t)4811859325589542932U, (uint64_t)3618763430148954981U,
-    (uint64_t)11434521746258554122U, (uint64_t)10086341535864049427U,
-    (uint64_t)8073421629570399570U, (uint64_t)12680586148814729338U, (uint64_t)9619958020761569612U,
-    (uint64_t)15827203580658384478U, (uint64_t)12832694810937550406U,
-    (uint64_t)14977975484447400910U, (uint64_t)5478002389061063653U,
-    (uint64_t)14731136312639060880U, (uint64_t)4317867687275472033U, (uint64_t)6642650962855259884U,
-    (uint64_t)2514254944289495285U, (uint64_t)14231405641534478436U, (uint64_t)4045448346091518946U,
-    (uint64_t)8985477013445972471U, (uint64_t)8869039454457032149U, (uint64_t)4356978486208692970U,
-    (uint64_t)10805288613335538577U, (uint64_t)12832353127812502042U,
-    (uint64_t)4576590051676547490U, (uint64_t)6728053735138655107U, (uint64_t)17814206719173206184U,
-    (uint64_t)79790138573994940U, (uint64_t)17920293215101822267U, (uint64_t)13422026625585728864U,
-    (uint64_t)5018058010492547271U, (uint64_t)110232326023384102U, (uint64_t)10834264070056942976U,
-    (uint64_t)15222249086119088588U, (uint64_t)15119439519142044997U,
-    (uint64_t)11655511970063167313U, (uint64_t)1614477029450566107U, (uint64_t)3619322817271059794U,
-    (uint64_t)9352862040415412867U, (uint64_t)14017522553242747074U,
-    (uint64_t)13138513643674040327U, (uint64_t)3610195242889455765U, (uint64_t)8371069193996567291U,
-    (uint64_t)12670227996544662654U, (uint64_t)1205961025092146303U,
-    (uint64_t)13106709934003962112U, (uint64_t)4350113471327723407U,
-    (uint64_t)15060941403739680459U, (uint64_t)13639127647823205030U,
-    (uint64_t)10790943339357725715U, (uint64_t)498760574280648264U, (uint64_t)17922071907832082887U,
-    (uint64_t)15122670976670152145U, (uint64_t)6275027991110214322U, (uint64_t)7250912847491816402U,
-    (uint64_t)15206617260142982380U, (uint64_t)3385668313694152877U,
-    (uint64_t)17522479771766801905U, (uint64_t)2965919117476170655U, (uint64_t)1553238516603269404U,
-    (uint64_t)5820770015631050991U, (uint64_t)4999445222232605348U, (uint64_t)9245650860833717444U,
-    (uint64_t)1508811811724230728U, (uint64_t)5190684913765614385U, (uint64_t)15692927070934536166U,
-    (uint64_t)12981978499190500902U, (uint64_t)5143491963193394698U, (uint64_t)7705698092144084129U,
-    (uint64_t)581120653055084783U, (uint64_t)13886552864486459714U, (uint64_t)6290301270652587255U,
-    (uint64_t)8663431529954393128U, (uint64_t)17033405846475472443U, (uint64_t)5206780355442651635U,
-    (uint64_t)12580364474736467688U, (uint64_t)17934601912005283310U,
-    (uint64_t)15119491731028933652U, (uint64_t)17848231399859044858U,
-    (uint64_t)4427673319524919329U, (uint64_t)2673607337074368008U, (uint64_t)14034876464294699949U,
-    (uint64_t)10938948975420813697U, (uint64_t)15202340615298669183U,
-    (uint64_t)5496603454069431071U, (uint64_t)2486526142064906845U, (uint64_t)4507882119510526802U,
-    (uint64_t)13888151172411390059U, (uint64_t)15049027856908071726U,
-    (uint64_t)9667231543181973158U, (uint64_t)6406671575277563202U, (uint64_t)3395801050331215139U,
-    (uint64_t)9813607433539108308U, (uint64_t)2681417728820980381U, (uint64_t)18407064643927113994U,
-    (uint64_t)7707177692113485527U, (uint64_t)14218149384635317074U, (uint64_t)3658668346206375919U,
-    (uint64_t)15404713991002362166U, (uint64_t)10152074687696195207U,
-    (uint64_t)10926946599582128139U, (uint64_t)16907298600007085320U,
-    (uint64_t)16544287219664720279U, (uint64_t)11007075933432813205U,
-    (uint64_t)8652245965145713599U, (uint64_t)7857626748965990384U, (uint64_t)5602306604520095870U,
-    (uint64_t)2525139243938658618U, (uint64_t)14405696176872077447U,
-    (uint64_t)18432270482137885332U, (uint64_t)9913880809120071177U,
-    (uint64_t)16896141737831216972U, (uint64_t)7484791498211214829U,
-    (uint64_t)15635259968266497469U, (uint64_t)8495118537612215624U, (uint64_t)4915477980562575356U,
-    (uint64_t)16453519279754924350U, (uint64_t)14462108244565406969U,
-    (uint64_t)14837837755237096687U, (uint64_t)14130171078892575346U,
-    (uint64_t)15423793222528491497U, (uint64_t)5460399262075036084U,
-    (uint64_t)16085440580308415349U, (uint64_t)26873200736954488U, (uint64_t)5603655807457499550U,
-    (uint64_t)3342202915871129617U, (uint64_t)1604413932150236626U, (uint64_t)9684226585089458974U,
-    (uint64_t)1213229904006618539U, (uint64_t)6782978662408837236U, (uint64_t)11197029877749307372U,
-    (uint64_t)14085968786551657744U, (uint64_t)17352273610494009342U,
-    (uint64_t)7876582961192434984U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1499621593102562565ULL, 16692369783039433128ULL,
+    15337520135922861848ULL, 5455737214495366228ULL, 17827017231032529600ULL,
+    12413621606240782649ULL, 2290483008028286132ULL, 15752017553340844820ULL,
+    4846430910634234874ULL, 10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL,
+    9866710912401645115ULL, 1162548847543228595ULL, 7649967190445130486ULL, 5212340432230915749ULL,
+    7572620550182916491ULL, 14876145112448665096ULL, 2063227348838176167ULL, 3519435548295415847ULL,
+    8390400282019023103ULL, 17666843593163037841ULL, 9450204148816496323ULL, 8483374507652916768ULL,
+    6254661047265818424ULL, 16382127809582285023ULL, 125359443771153172ULL, 1374336701588437897ULL,
+    11362596098420127726ULL, 2101654420738681387ULL, 12772780342444840510ULL,
+    12546934328908550060ULL, 8331880412333790397ULL, 11687262051473819904ULL,
+    8926848496503457587ULL, 9603974142010467857ULL, 13199952163826973175ULL, 2189856264898797734ULL,
+    11356074861870267226ULL, 2027714896422561895ULL, 5261606367808050149ULL, 153855954337762312ULL,
+    6375919692894573986ULL, 12364041207536146533ULL, 1891896010455057160ULL, 1568123795087313171ULL,
+    18138710056556660101ULL, 6004886947510047736ULL, 4811859325589542932ULL, 3618763430148954981ULL,
+    11434521746258554122ULL, 10086341535864049427ULL, 8073421629570399570ULL,
+    12680586148814729338ULL, 9619958020761569612ULL, 15827203580658384478ULL,
+    12832694810937550406ULL, 14977975484447400910ULL, 5478002389061063653ULL,
+    14731136312639060880ULL, 4317867687275472033ULL, 6642650962855259884ULL, 2514254944289495285ULL,
+    14231405641534478436ULL, 4045448346091518946ULL, 8985477013445972471ULL, 8869039454457032149ULL,
+    4356978486208692970ULL, 10805288613335538577ULL, 12832353127812502042ULL,
+    4576590051676547490ULL, 6728053735138655107ULL, 17814206719173206184ULL, 79790138573994940ULL,
+    17920293215101822267ULL, 13422026625585728864ULL, 5018058010492547271ULL, 110232326023384102ULL,
+    10834264070056942976ULL, 15222249086119088588ULL, 15119439519142044997ULL,
+    11655511970063167313ULL, 1614477029450566107ULL, 3619322817271059794ULL, 9352862040415412867ULL,
+    14017522553242747074ULL, 13138513643674040327ULL, 3610195242889455765ULL,
+    8371069193996567291ULL, 12670227996544662654ULL, 1205961025092146303ULL,
+    13106709934003962112ULL, 4350113471327723407ULL, 15060941403739680459ULL,
+    13639127647823205030ULL, 10790943339357725715ULL, 498760574280648264ULL,
+    17922071907832082887ULL, 15122670976670152145ULL, 6275027991110214322ULL,
+    7250912847491816402ULL, 15206617260142982380ULL, 3385668313694152877ULL,
+    17522479771766801905ULL, 2965919117476170655ULL, 1553238516603269404ULL, 5820770015631050991ULL,
+    4999445222232605348ULL, 9245650860833717444ULL, 1508811811724230728ULL, 5190684913765614385ULL,
+    15692927070934536166ULL, 12981978499190500902ULL, 5143491963193394698ULL,
+    7705698092144084129ULL, 581120653055084783ULL, 13886552864486459714ULL, 6290301270652587255ULL,
+    8663431529954393128ULL, 17033405846475472443ULL, 5206780355442651635ULL,
+    12580364474736467688ULL, 17934601912005283310ULL, 15119491731028933652ULL,
+    17848231399859044858ULL, 4427673319524919329ULL, 2673607337074368008ULL,
+    14034876464294699949ULL, 10938948975420813697ULL, 15202340615298669183ULL,
+    5496603454069431071ULL, 2486526142064906845ULL, 4507882119510526802ULL, 13888151172411390059ULL,
+    15049027856908071726ULL, 9667231543181973158ULL, 6406671575277563202ULL, 3395801050331215139ULL,
+    9813607433539108308ULL, 2681417728820980381ULL, 18407064643927113994ULL, 7707177692113485527ULL,
+    14218149384635317074ULL, 3658668346206375919ULL, 15404713991002362166ULL,
+    10152074687696195207ULL, 10926946599582128139ULL, 16907298600007085320ULL,
+    16544287219664720279ULL, 11007075933432813205ULL, 8652245965145713599ULL,
+    7857626748965990384ULL, 5602306604520095870ULL, 2525139243938658618ULL, 14405696176872077447ULL,
+    18432270482137885332ULL, 9913880809120071177ULL, 16896141737831216972ULL,
+    7484791498211214829ULL, 15635259968266497469ULL, 8495118537612215624ULL, 4915477980562575356ULL,
+    16453519279754924350ULL, 14462108244565406969ULL, 14837837755237096687ULL,
+    14130171078892575346ULL, 15423793222528491497ULL, 5460399262075036084ULL,
+    16085440580308415349ULL, 26873200736954488ULL, 5603655807457499550ULL, 3342202915871129617ULL,
+    1604413932150236626ULL, 9684226585089458974ULL, 1213229904006618539ULL, 6782978662408837236ULL,
+    11197029877749307372ULL, 14085968786551657744ULL, 17352273610494009342ULL,
+    7876582961192434984ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)14619254753077084366U,
-    (uint64_t)13913835116514008593U, (uint64_t)15060744674088488145U,
-    (uint64_t)17668414598203068685U, (uint64_t)10761169236902342334U,
-    (uint64_t)15467027479157446221U, (uint64_t)14989185522423469618U,
-    (uint64_t)14354539272510107003U, (uint64_t)14298211796392133693U,
-    (uint64_t)13270323784253711450U, (uint64_t)13380964971965046957U,
-    (uint64_t)8686204248456909699U, (uint64_t)17434630286744937066U, (uint64_t)1355903775279084720U,
-    (uint64_t)7554695053550308662U, (uint64_t)11354971222741863570U, (uint64_t)564601613420749879U,
-    (uint64_t)8466325837259054896U, (uint64_t)10752965181772434263U,
-    (uint64_t)11405876547368426319U, (uint64_t)13791894568738930940U,
-    (uint64_t)8230587134406354675U, (uint64_t)12415514098722758608U,
-    (uint64_t)18414183046995786744U, (uint64_t)15508000368227372870U,
-    (uint64_t)5781062464627999307U, (uint64_t)15339429052219195590U,
-    (uint64_t)16038703753810741903U, (uint64_t)9587718938298980714U, (uint64_t)4822658817952386407U,
-    (uint64_t)1376351024833260660U, (uint64_t)1120174910554766702U, (uint64_t)1730170933262569274U,
-    (uint64_t)5187428548444533500U, (uint64_t)16242053503368957131U, (uint64_t)3036811119519868279U,
-    (uint64_t)1760267587958926638U, (uint64_t)170244572981065185U, (uint64_t)8063080791967388171U,
-    (uint64_t)4824892826607692737U, (uint64_t)16286391083472040552U,
-    (uint64_t)11945158615253358747U, (uint64_t)14096887760410224200U,
-    (uint64_t)1613720831904557039U, (uint64_t)14316966673761197523U,
-    (uint64_t)17411006201485445341U, (uint64_t)8112301506943158801U, (uint64_t)2069889233927989984U,
-    (uint64_t)10082848378277483927U, (uint64_t)3609691194454404430U, (uint64_t)6110437205371933689U,
-    (uint64_t)9769135977342231601U, (uint64_t)11977962151783386478U,
-    (uint64_t)18088718692559983573U, (uint64_t)11741637975753055U, (uint64_t)11110390325701582190U,
-    (uint64_t)1341402251566067019U, (uint64_t)3028229550849726478U, (uint64_t)10438984083997451310U,
-    (uint64_t)12730851885100145709U, (uint64_t)11524169532089894189U,
-    (uint64_t)4523375903229602674U, (uint64_t)2028602258037385622U, (uint64_t)17082839063089388410U,
-    (uint64_t)6103921364634113167U, (uint64_t)17066180888225306102U,
-    (uint64_t)11395680486707876195U, (uint64_t)10952892272443345484U,
-    (uint64_t)8792831960605859401U, (uint64_t)14194485427742325139U,
-    (uint64_t)15146020821144305250U, (uint64_t)1654766014957123343U, (uint64_t)7955526243090948551U,
-    (uint64_t)3989277566080493308U, (uint64_t)12229385116397931231U,
-    (uint64_t)13430548930727025562U, (uint64_t)3434892688179800602U, (uint64_t)8431998794645622027U,
-    (uint64_t)12132530981596299272U, (uint64_t)2289461608863966999U,
-    (uint64_t)18345870950201487179U, (uint64_t)13517947207801901576U,
-    (uint64_t)5213113244172561159U, (uint64_t)17632986594098340879U, (uint64_t)4405251818133148856U,
-    (uint64_t)11783009269435447793U, (uint64_t)9332138983770046035U,
-    (uint64_t)12863411548922539505U, (uint64_t)3717030292816178224U,
-    (uint64_t)10026078446427137374U, (uint64_t)11167295326594317220U,
-    (uint64_t)12425328773141588668U, (uint64_t)5760335125172049352U, (uint64_t)9016843701117277863U,
-    (uint64_t)5657892835694680172U, (uint64_t)11025130589305387464U, (uint64_t)1368484957977406173U,
-    (uint64_t)17361351345281258834U, (uint64_t)1907113641956152700U,
-    (uint64_t)16439233413531427752U, (uint64_t)5893322296986588932U,
-    (uint64_t)14000206906171746627U, (uint64_t)14979266987545792900U,
-    (uint64_t)6926291766898221120U, (uint64_t)7162023296083360752U, (uint64_t)14762747553625382529U,
-    (uint64_t)12610831658612406849U, (uint64_t)10462926899548715515U,
-    (uint64_t)4794017723140405312U, (uint64_t)5234438200490163319U, (uint64_t)8019519110339576320U,
-    (uint64_t)7194604241290530100U, (uint64_t)12626770134810813246U,
-    (uint64_t)10793074474236419890U, (uint64_t)11323224347913978783U,
-    (uint64_t)16831128015895380245U, (uint64_t)18323094195124693378U,
-    (uint64_t)2361097165281567692U, (uint64_t)15755578675014279498U,
-    (uint64_t)14289876470325854580U, (uint64_t)12856787656093616839U,
-    (uint64_t)3578928531243900594U, (uint64_t)3847532758790503699U, (uint64_t)8377953190224748743U,
-    (uint64_t)3314546646092744596U, (uint64_t)800810188859334358U, (uint64_t)4626344124229343596U,
-    (uint64_t)6620381605850876621U, (uint64_t)11422073570955989527U,
-    (uint64_t)12676813626484814469U, (uint64_t)16725029886764122240U,
-    (uint64_t)16648497372773830008U, (uint64_t)9135702594931291048U,
-    (uint64_t)16080949688826680333U, (uint64_t)11528096561346602947U,
-    (uint64_t)2632498067099740984U, (uint64_t)11583842699108800714U, (uint64_t)8378404864573610526U,
-    (uint64_t)1076560261627788534U, (uint64_t)13836015994325032828U,
-    (uint64_t)11234295937817067909U, (uint64_t)5893659808396722708U,
-    (uint64_t)11277421142886984364U, (uint64_t)8968549037166726491U,
-    (uint64_t)14841374331394032822U, (uint64_t)9967344773947889341U, (uint64_t)8799244393578496085U,
-    (uint64_t)5094686877301601410U, (uint64_t)8780316747074726862U, (uint64_t)9119697306829835718U,
-    (uint64_t)15381243327921855368U, (uint64_t)2686250164449435196U,
-    (uint64_t)16466917280442198358U, (uint64_t)13791704489163125216U,
-    (uint64_t)16955859337117924272U, (uint64_t)17112836394923783642U,
-    (uint64_t)4639176427338618063U, (uint64_t)16770029310141094964U,
-    (uint64_t)11049953922966416185U, (uint64_t)12012669590884098968U,
-    (uint64_t)4859326885929417214U, (uint64_t)896380084392586061U, (uint64_t)7153028362977034008U,
-    (uint64_t)10540021163316263301U, (uint64_t)9318277998512936585U,
-    (uint64_t)18344496977694796523U, (uint64_t)11374737400567645494U,
-    (uint64_t)17158800051138212954U, (uint64_t)18343197867863253153U,
-    (uint64_t)18204799297967861226U, (uint64_t)15798973531606348828U,
-    (uint64_t)9870158263408310459U, (uint64_t)17578869832774612627U, (uint64_t)8395748875822696932U,
-    (uint64_t)15310679007370670872U, (uint64_t)11205576736030808860U,
-    (uint64_t)10123429210002838967U, (uint64_t)5910544144088393959U,
-    (uint64_t)14016615653353687369U, (uint64_t)11191676704772957822U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 14619254753077084366ULL, 13913835116514008593ULL,
+    15060744674088488145ULL, 17668414598203068685ULL, 10761169236902342334ULL,
+    15467027479157446221ULL, 14989185522423469618ULL, 14354539272510107003ULL,
+    14298211796392133693ULL, 13270323784253711450ULL, 13380964971965046957ULL,
+    8686204248456909699ULL, 17434630286744937066ULL, 1355903775279084720ULL, 7554695053550308662ULL,
+    11354971222741863570ULL, 564601613420749879ULL, 8466325837259054896ULL, 10752965181772434263ULL,
+    11405876547368426319ULL, 13791894568738930940ULL, 8230587134406354675ULL,
+    12415514098722758608ULL, 18414183046995786744ULL, 15508000368227372870ULL,
+    5781062464627999307ULL, 15339429052219195590ULL, 16038703753810741903ULL,
+    9587718938298980714ULL, 4822658817952386407ULL, 1376351024833260660ULL, 1120174910554766702ULL,
+    1730170933262569274ULL, 5187428548444533500ULL, 16242053503368957131ULL, 3036811119519868279ULL,
+    1760267587958926638ULL, 170244572981065185ULL, 8063080791967388171ULL, 4824892826607692737ULL,
+    16286391083472040552ULL, 11945158615253358747ULL, 14096887760410224200ULL,
+    1613720831904557039ULL, 14316966673761197523ULL, 17411006201485445341ULL,
+    8112301506943158801ULL, 2069889233927989984ULL, 10082848378277483927ULL, 3609691194454404430ULL,
+    6110437205371933689ULL, 9769135977342231601ULL, 11977962151783386478ULL,
+    18088718692559983573ULL, 11741637975753055ULL, 11110390325701582190ULL, 1341402251566067019ULL,
+    3028229550849726478ULL, 10438984083997451310ULL, 12730851885100145709ULL,
+    11524169532089894189ULL, 4523375903229602674ULL, 2028602258037385622ULL,
+    17082839063089388410ULL, 6103921364634113167ULL, 17066180888225306102ULL,
+    11395680486707876195ULL, 10952892272443345484ULL, 8792831960605859401ULL,
+    14194485427742325139ULL, 15146020821144305250ULL, 1654766014957123343ULL,
+    7955526243090948551ULL, 3989277566080493308ULL, 12229385116397931231ULL,
+    13430548930727025562ULL, 3434892688179800602ULL, 8431998794645622027ULL,
+    12132530981596299272ULL, 2289461608863966999ULL, 18345870950201487179ULL,
+    13517947207801901576ULL, 5213113244172561159ULL, 17632986594098340879ULL,
+    4405251818133148856ULL, 11783009269435447793ULL, 9332138983770046035ULL,
+    12863411548922539505ULL, 3717030292816178224ULL, 10026078446427137374ULL,
+    11167295326594317220ULL, 12425328773141588668ULL, 5760335125172049352ULL,
+    9016843701117277863ULL, 5657892835694680172ULL, 11025130589305387464ULL, 1368484957977406173ULL,
+    17361351345281258834ULL, 1907113641956152700ULL, 16439233413531427752ULL,
+    5893322296986588932ULL, 14000206906171746627ULL, 14979266987545792900ULL,
+    6926291766898221120ULL, 7162023296083360752ULL, 14762747553625382529ULL,
+    12610831658612406849ULL, 10462926899548715515ULL, 4794017723140405312ULL,
+    5234438200490163319ULL, 8019519110339576320ULL, 7194604241290530100ULL, 12626770134810813246ULL,
+    10793074474236419890ULL, 11323224347913978783ULL, 16831128015895380245ULL,
+    18323094195124693378ULL, 2361097165281567692ULL, 15755578675014279498ULL,
+    14289876470325854580ULL, 12856787656093616839ULL, 3578928531243900594ULL,
+    3847532758790503699ULL, 8377953190224748743ULL, 3314546646092744596ULL, 800810188859334358ULL,
+    4626344124229343596ULL, 6620381605850876621ULL, 11422073570955989527ULL,
+    12676813626484814469ULL, 16725029886764122240ULL, 16648497372773830008ULL,
+    9135702594931291048ULL, 16080949688826680333ULL, 11528096561346602947ULL,
+    2632498067099740984ULL, 11583842699108800714ULL, 8378404864573610526ULL, 1076560261627788534ULL,
+    13836015994325032828ULL, 11234295937817067909ULL, 5893659808396722708ULL,
+    11277421142886984364ULL, 8968549037166726491ULL, 14841374331394032822ULL,
+    9967344773947889341ULL, 8799244393578496085ULL, 5094686877301601410ULL, 8780316747074726862ULL,
+    9119697306829835718ULL, 15381243327921855368ULL, 2686250164449435196ULL,
+    16466917280442198358ULL, 13791704489163125216ULL, 16955859337117924272ULL,
+    17112836394923783642ULL, 4639176427338618063ULL, 16770029310141094964ULL,
+    11049953922966416185ULL, 12012669590884098968ULL, 4859326885929417214ULL, 896380084392586061ULL,
+    7153028362977034008ULL, 10540021163316263301ULL, 9318277998512936585ULL,
+    18344496977694796523ULL, 11374737400567645494ULL, 17158800051138212954ULL,
+    18343197867863253153ULL, 18204799297967861226ULL, 15798973531606348828ULL,
+    9870158263408310459ULL, 17578869832774612627ULL, 8395748875822696932ULL,
+    15310679007370670872ULL, 11205576736030808860ULL, 10123429210002838967ULL,
+    5910544144088393959ULL, 14016615653353687369ULL, 11191676704772957822ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)7870395003430845958U,
-    (uint64_t)18001862936410067720U, (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-    (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U, (uint64_t)7139806720777708306U,
-    (uint64_t)8253938546650739833U, (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-    (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U, (uint64_t)8392845221328116213U,
-    (uint64_t)14630296398338540788U, (uint64_t)4268947906723414372U, (uint64_t)9231207002243517909U,
-    (uint64_t)14261219637616504262U, (uint64_t)7786881626982345356U,
-    (uint64_t)11412720751765882139U, (uint64_t)14119585051365330009U,
-    (uint64_t)15281626286521302128U, (uint64_t)6350171933454266732U,
-    (uint64_t)16559468304937127866U, (uint64_t)13200760478271693417U,
-    (uint64_t)6733381546280350776U, (uint64_t)3801404890075189193U, (uint64_t)2741036364686993903U,
-    (uint64_t)3218612940540174008U, (uint64_t)10894914335165419505U,
-    (uint64_t)11862941430149998362U, (uint64_t)4223151729402839584U, (uint64_t)2913215088487087887U,
-    (uint64_t)14562168920104952953U, (uint64_t)2170089393468287453U,
-    (uint64_t)10520900655016579352U, (uint64_t)7040362608949989273U, (uint64_t)8376510559381705307U,
-    (uint64_t)9142237200448131532U, (uint64_t)5696859948123854080U, (uint64_t)925422306716081180U,
-    (uint64_t)11155545953469186421U, (uint64_t)1888208646862572812U,
-    (uint64_t)11151095998248845721U, (uint64_t)15793503271680275267U,
-    (uint64_t)7729877044494854851U, (uint64_t)6235134673193032913U, (uint64_t)7364280682182401564U,
-    (uint64_t)5479679373325519985U, (uint64_t)17966037684582301763U,
-    (uint64_t)14140891609330279185U, (uint64_t)5814744449740463867U, (uint64_t)5652588426712591652U,
-    (uint64_t)774745682988690912U, (uint64_t)13228255573220500373U, (uint64_t)11949122068786859397U,
-    (uint64_t)8021166392900770376U, (uint64_t)7994323710948720063U, (uint64_t)9924618472877849977U,
-    (uint64_t)17618517523141194266U, (uint64_t)2750424097794401714U,
-    (uint64_t)15481749570715253207U, (uint64_t)14646964509921760497U,
-    (uint64_t)1037442848094301355U, (uint64_t)6295995947389299132U, (uint64_t)16915049722317579514U,
-    (uint64_t)10493877400992990313U, (uint64_t)18391008753060553521U, (uint64_t)483942209623707598U,
-    (uint64_t)2017775662838016613U, (uint64_t)5933251998459363553U, (uint64_t)11789135019970707407U,
-    (uint64_t)5484123723153268336U, (uint64_t)13246954648848484954U, (uint64_t)4774374393926023505U,
-    (uint64_t)14863995618704457336U, (uint64_t)13220153167104973625U,
-    (uint64_t)5988445485312390826U, (uint64_t)17580359464028944682U, (uint64_t)7297100131969874771U,
-    (uint64_t)379931507867989375U, (uint64_t)10927113096513421444U, (uint64_t)17688881974428340857U,
-    (uint64_t)4259872578781463333U, (uint64_t)8573076295966784472U, (uint64_t)16389829450727275032U,
-    (uint64_t)1667243868963568259U, (uint64_t)17730726848925960919U,
-    (uint64_t)11408899874569778008U, (uint64_t)3576527582023272268U,
-    (uint64_t)16492920640224231656U, (uint64_t)7906130545972460130U,
-    (uint64_t)13878604278207681266U, (uint64_t)41446695125652041U, (uint64_t)8891615271337333503U,
-    (uint64_t)2594537723613594470U, (uint64_t)7699579176995770924U, (uint64_t)147458463055730655U,
-    (uint64_t)12120406862739088406U, (uint64_t)12044892493010567063U,
-    (uint64_t)8554076749615475136U, (uint64_t)1005097692260929999U, (uint64_t)2687202654471188715U,
-    (uint64_t)9457588752176879209U, (uint64_t)17472884880062444019U, (uint64_t)9792097892056020166U,
-    (uint64_t)2525246678512797150U, (uint64_t)15958903035313115662U,
-    (uint64_t)11336038170342247032U, (uint64_t)11560342382835141123U,
-    (uint64_t)6212009033479929024U, (uint64_t)8214308203775021229U, (uint64_t)8475469210070503698U,
-    (uint64_t)13287024123485719563U, (uint64_t)12956951963817520723U,
-    (uint64_t)10693035819908470465U, (uint64_t)11375478788224786725U,
-    (uint64_t)16934625208487120398U, (uint64_t)10094585729115874495U,
-    (uint64_t)2763884524395905776U, (uint64_t)13535890148969964883U,
-    (uint64_t)13514657411765064358U, (uint64_t)9903074440788027562U,
-    (uint64_t)17324720726421199990U, (uint64_t)2273931039117368789U, (uint64_t)3442641041506157854U,
-    (uint64_t)1119853641236409612U, (uint64_t)12037070344296077989U, (uint64_t)581736433335671746U,
-    (uint64_t)6019150647054369174U, (uint64_t)14864096138068789375U, (uint64_t)6652995210998318662U,
-    (uint64_t)12773883697029175304U, (uint64_t)12751275631451845119U,
-    (uint64_t)11449095003038250478U, (uint64_t)1025805267334366480U, (uint64_t)2764432500300815015U,
-    (uint64_t)18274564429002844381U, (uint64_t)10445634195592600351U,
-    (uint64_t)11814099592837202735U, (uint64_t)5006796893679120289U, (uint64_t)6908397253997261914U,
-    (uint64_t)13266696965302879279U, (uint64_t)7768715053015037430U, (uint64_t)3569923738654785686U,
-    (uint64_t)5844853453464857549U, (uint64_t)1837340805629559110U, (uint64_t)1034657624388283114U,
-    (uint64_t)711244516069456460U, (uint64_t)12519286026957934814U, (uint64_t)2613464944620837619U,
-    (uint64_t)10003023321338286213U, (uint64_t)7291332092642881376U, (uint64_t)9832199564117004897U,
-    (uint64_t)3280736694860799890U, (uint64_t)6416452202849179874U, (uint64_t)7326961381798642069U,
-    (uint64_t)8435688798040635029U, (uint64_t)16630141263910982958U,
-    (uint64_t)17222635514422533318U, (uint64_t)9482787389178881499U, (uint64_t)836561194658263905U,
-    (uint64_t)3405319043337616649U, (uint64_t)2786146577568026518U, (uint64_t)7625483685691626321U,
-    (uint64_t)6728084875304656716U, (uint64_t)1140997959232544268U, (uint64_t)12847384827606303792U,
-    (uint64_t)1719121337754572070U, (uint64_t)12863589482936438532U, (uint64_t)3880712899640530862U,
-    (uint64_t)2748456882813671564U, (uint64_t)4775988900044623019U, (uint64_t)8937847374382191162U,
-    (uint64_t)3767367347172252295U, (uint64_t)13468672401049388646U,
-    (uint64_t)14359032216842397576U, (uint64_t)2002555958685443975U,
-    (uint64_t)16488678606651526810U, (uint64_t)11826135409597474760U,
-    (uint64_t)15296495673182508601U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+    5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL, 7139806720777708306ULL,
+    8253938546650739833ULL, 17490482834545705718ULL, 1065249776797037500ULL, 5018258455937968775ULL,
+    14100621120178668337ULL, 8392845221328116213ULL, 14630296398338540788ULL,
+    4268947906723414372ULL, 9231207002243517909ULL, 14261219637616504262ULL, 7786881626982345356ULL,
+    11412720751765882139ULL, 14119585051365330009ULL, 15281626286521302128ULL,
+    6350171933454266732ULL, 16559468304937127866ULL, 13200760478271693417ULL,
+    6733381546280350776ULL, 3801404890075189193ULL, 2741036364686993903ULL, 3218612940540174008ULL,
+    10894914335165419505ULL, 11862941430149998362ULL, 4223151729402839584ULL,
+    2913215088487087887ULL, 14562168920104952953ULL, 2170089393468287453ULL,
+    10520900655016579352ULL, 7040362608949989273ULL, 8376510559381705307ULL, 9142237200448131532ULL,
+    5696859948123854080ULL, 925422306716081180ULL, 11155545953469186421ULL, 1888208646862572812ULL,
+    11151095998248845721ULL, 15793503271680275267ULL, 7729877044494854851ULL,
+    6235134673193032913ULL, 7364280682182401564ULL, 5479679373325519985ULL, 17966037684582301763ULL,
+    14140891609330279185ULL, 5814744449740463867ULL, 5652588426712591652ULL, 774745682988690912ULL,
+    13228255573220500373ULL, 11949122068786859397ULL, 8021166392900770376ULL,
+    7994323710948720063ULL, 9924618472877849977ULL, 17618517523141194266ULL, 2750424097794401714ULL,
+    15481749570715253207ULL, 14646964509921760497ULL, 1037442848094301355ULL,
+    6295995947389299132ULL, 16915049722317579514ULL, 10493877400992990313ULL,
+    18391008753060553521ULL, 483942209623707598ULL, 2017775662838016613ULL, 5933251998459363553ULL,
+    11789135019970707407ULL, 5484123723153268336ULL, 13246954648848484954ULL,
+    4774374393926023505ULL, 14863995618704457336ULL, 13220153167104973625ULL,
+    5988445485312390826ULL, 17580359464028944682ULL, 7297100131969874771ULL, 379931507867989375ULL,
+    10927113096513421444ULL, 17688881974428340857ULL, 4259872578781463333ULL,
+    8573076295966784472ULL, 16389829450727275032ULL, 1667243868963568259ULL,
+    17730726848925960919ULL, 11408899874569778008ULL, 3576527582023272268ULL,
+    16492920640224231656ULL, 7906130545972460130ULL, 13878604278207681266ULL, 41446695125652041ULL,
+    8891615271337333503ULL, 2594537723613594470ULL, 7699579176995770924ULL, 147458463055730655ULL,
+    12120406862739088406ULL, 12044892493010567063ULL, 8554076749615475136ULL,
+    1005097692260929999ULL, 2687202654471188715ULL, 9457588752176879209ULL, 17472884880062444019ULL,
+    9792097892056020166ULL, 2525246678512797150ULL, 15958903035313115662ULL,
+    11336038170342247032ULL, 11560342382835141123ULL, 6212009033479929024ULL,
+    8214308203775021229ULL, 8475469210070503698ULL, 13287024123485719563ULL,
+    12956951963817520723ULL, 10693035819908470465ULL, 11375478788224786725ULL,
+    16934625208487120398ULL, 10094585729115874495ULL, 2763884524395905776ULL,
+    13535890148969964883ULL, 13514657411765064358ULL, 9903074440788027562ULL,
+    17324720726421199990ULL, 2273931039117368789ULL, 3442641041506157854ULL, 1119853641236409612ULL,
+    12037070344296077989ULL, 581736433335671746ULL, 6019150647054369174ULL, 14864096138068789375ULL,
+    6652995210998318662ULL, 12773883697029175304ULL, 12751275631451845119ULL,
+    11449095003038250478ULL, 1025805267334366480ULL, 2764432500300815015ULL,
+    18274564429002844381ULL, 10445634195592600351ULL, 11814099592837202735ULL,
+    5006796893679120289ULL, 6908397253997261914ULL, 13266696965302879279ULL, 7768715053015037430ULL,
+    3569923738654785686ULL, 5844853453464857549ULL, 1837340805629559110ULL, 1034657624388283114ULL,
+    711244516069456460ULL, 12519286026957934814ULL, 2613464944620837619ULL, 10003023321338286213ULL,
+    7291332092642881376ULL, 9832199564117004897ULL, 3280736694860799890ULL, 6416452202849179874ULL,
+    7326961381798642069ULL, 8435688798040635029ULL, 16630141263910982958ULL,
+    17222635514422533318ULL, 9482787389178881499ULL, 836561194658263905ULL, 3405319043337616649ULL,
+    2786146577568026518ULL, 7625483685691626321ULL, 6728084875304656716ULL, 1140997959232544268ULL,
+    12847384827606303792ULL, 1719121337754572070ULL, 12863589482936438532ULL,
+    3880712899640530862ULL, 2748456882813671564ULL, 4775988900044623019ULL, 8937847374382191162ULL,
+    3767367347172252295ULL, 13468672401049388646ULL, 14359032216842397576ULL,
+    2002555958685443975ULL, 16488678606651526810ULL, 11826135409597474760ULL,
+    15296495673182508601ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w5[384U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U,
-    (uint64_t)13240193491554624643U, (uint64_t)12365034249541329920U,
-    (uint64_t)2924326828590977357U, (uint64_t)5687195797140589099U, (uint64_t)16880427227292834531U,
-    (uint64_t)9691471435758991112U, (uint64_t)16642385273732487288U,
-    (uint64_t)12173806747523009914U, (uint64_t)13142722756877876849U,
-    (uint64_t)8370377548305121979U, (uint64_t)17988526053752025426U, (uint64_t)4818750752684100334U,
-    (uint64_t)5669241919350361655U, (uint64_t)4964810303238518540U, (uint64_t)16709712747671533191U,
-    (uint64_t)4461414404267448242U, (uint64_t)3971798785139504238U, (uint64_t)6276818948740422136U,
-    (uint64_t)1426735892164275762U, (uint64_t)7943622674892418919U, (uint64_t)9864274225563929680U,
-    (uint64_t)57815533745003233U, (uint64_t)10893588105168960233U, (uint64_t)15739162732907069535U,
-    (uint64_t)3923866849462073470U, (uint64_t)12279826158399226875U, (uint64_t)1533015761334846582U,
-    (uint64_t)15860156818568437510U, (uint64_t)8252625373831297988U, (uint64_t)9666953804812706358U,
-    (uint64_t)8767785238646914634U, (uint64_t)14382179044941403551U,
-    (uint64_t)10401039907264254245U, (uint64_t)8584860003763157350U, (uint64_t)3120462679504470266U,
-    (uint64_t)8670255778748340069U, (uint64_t)5313789577940369984U, (uint64_t)16977072364454789224U,
-    (uint64_t)12199578693972188324U, (uint64_t)18211098771672599237U,
-    (uint64_t)12868831556008795030U, (uint64_t)5310155061431048194U,
-    (uint64_t)18114153238435112606U, (uint64_t)14482365809278304512U,
-    (uint64_t)12520721662723001511U, (uint64_t)405943624021143002U, (uint64_t)8146944101507657423U,
-    (uint64_t)181739317780393495U, (uint64_t)81743892273670099U, (uint64_t)14759561962550473930U,
-    (uint64_t)4592623849546992939U, (uint64_t)6916440441743449719U, (uint64_t)1304610503530809833U,
-    (uint64_t)5464930909232486441U, (uint64_t)15414883617496224671U, (uint64_t)8129283345256790U,
-    (uint64_t)18294252198413739489U, (uint64_t)17394115281884857288U,
-    (uint64_t)7808348415224731235U, (uint64_t)13195566655747230608U, (uint64_t)8568194219353949094U,
-    (uint64_t)15329813048672122440U, (uint64_t)9604275495885785744U, (uint64_t)1577712551205219835U,
-    (uint64_t)15964209008022052790U, (uint64_t)15087297920782098160U,
-    (uint64_t)3946031512438511898U, (uint64_t)10050061168984440631U,
-    (uint64_t)11382452014533138316U, (uint64_t)6313670788911952792U,
-    (uint64_t)12015989229696164014U, (uint64_t)5946702628076168852U, (uint64_t)5219995658774362841U,
-    (uint64_t)12230141881068377972U, (uint64_t)12361195202673441956U,
-    (uint64_t)4732862275653856711U, (uint64_t)17221430380805252370U,
-    (uint64_t)15397525953897375810U, (uint64_t)16557437297239563045U,
-    (uint64_t)10101683801868971351U, (uint64_t)1402611372245592868U, (uint64_t)1931806383735563658U,
-    (uint64_t)10991705207471512479U, (uint64_t)861333583207471392U, (uint64_t)15207766844626322355U,
-    (uint64_t)9224628129811432393U, (uint64_t)3497069567089055613U, (uint64_t)11956632757898590316U,
-    (uint64_t)8733729372586312960U, (uint64_t)18091521051714930927U, (uint64_t)77582787724373283U,
-    (uint64_t)9922437373519669237U, (uint64_t)3079321456325704615U, (uint64_t)12171198408512478457U,
-    (uint64_t)17179130884012147596U, (uint64_t)6839115479620367181U, (uint64_t)4421032569964105406U,
-    (uint64_t)10353331468657256053U, (uint64_t)17400988720335968824U,
-    (uint64_t)17138855889417480540U, (uint64_t)4507980080381370611U,
-    (uint64_t)10703175719793781886U, (uint64_t)12598516658725890426U,
-    (uint64_t)8353463412173898932U, (uint64_t)17703029389228422404U, (uint64_t)9313111267107226233U,
-    (uint64_t)5441322942995154196U, (uint64_t)8952817660034465484U, (uint64_t)17571113341183703118U,
-    (uint64_t)7375087953801067019U, (uint64_t)13381466302076453648U, (uint64_t)3218165271423914596U,
-    (uint64_t)16956372157249382685U, (uint64_t)509080090049418841U, (uint64_t)13374233893294084913U,
-    (uint64_t)2988537624204297086U, (uint64_t)4979195832939384620U, (uint64_t)3803931594068976394U,
-    (uint64_t)10731535883829627646U, (uint64_t)12954845047607194278U,
-    (uint64_t)10494298062560667399U, (uint64_t)4967351022190213065U,
-    (uint64_t)13391917938145756456U, (uint64_t)951370484866918160U, (uint64_t)13531334179067685307U,
-    (uint64_t)12868421357919390599U, (uint64_t)15918857042998130258U,
-    (uint64_t)17769743831936974016U, (uint64_t)7137921979260368809U,
-    (uint64_t)12461369180685892062U, (uint64_t)827476514081935199U, (uint64_t)15107282134224767230U,
-    (uint64_t)10084765752802805748U, (uint64_t)3303739059392464407U,
-    (uint64_t)17859532612136591428U, (uint64_t)10949414770405040164U,
-    (uint64_t)12838613589371008785U, (uint64_t)5554397169231540728U,
-    (uint64_t)18375114572169624408U, (uint64_t)15649286703242390139U,
-    (uint64_t)2957281557463706877U, (uint64_t)14000350446219393213U,
-    (uint64_t)14355199721749620351U, (uint64_t)2730856240099299695U,
-    (uint64_t)17528131000714705752U, (uint64_t)2537498525883536360U, (uint64_t)6121058967084509393U,
-    (uint64_t)16897667060435514221U, (uint64_t)12367869599571112440U,
-    (uint64_t)3388831797050807508U, (uint64_t)16791449724090982798U, (uint64_t)2673426123453294928U,
-    (uint64_t)11369313542384405846U, (uint64_t)15641960333586432634U,
-    (uint64_t)15080962589658958379U, (uint64_t)7747943772340226569U, (uint64_t)8075023376199159152U,
-    (uint64_t)8485093027378306528U, (uint64_t)13503706844122243648U, (uint64_t)8401961362938086226U,
-    (uint64_t)8125426002124226402U, (uint64_t)9005399361407785203U, (uint64_t)6847968030066906634U,
-    (uint64_t)11934937736309295197U, (uint64_t)5116750888594772351U, (uint64_t)2817039227179245227U,
-    (uint64_t)17724206901239332980U, (uint64_t)4985702708254058578U, (uint64_t)5786345435756642871U,
-    (uint64_t)17772527414940936938U, (uint64_t)1201320251272957006U,
-    (uint64_t)15787430120324348129U, (uint64_t)6305488781359965661U,
-    (uint64_t)12423900845502858433U, (uint64_t)17485949424202277720U,
-    (uint64_t)2062237315546855852U, (uint64_t)10353639467860902375U, (uint64_t)2315398490451287299U,
-    (uint64_t)15394572894814882621U, (uint64_t)232866113801165640U, (uint64_t)7413443736109338926U,
-    (uint64_t)902719806551551191U, (uint64_t)16568853118619045174U, (uint64_t)14202214862428279177U,
-    (uint64_t)11719595395278861192U, (uint64_t)5890053236389907647U, (uint64_t)9996196494965833627U,
-    (uint64_t)12967056942364782577U, (uint64_t)9034128755157395787U,
-    (uint64_t)17898204904710512655U, (uint64_t)8229373445062993977U,
-    (uint64_t)13580036169519833644U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL, 13240193491554624643ULL, 12365034249541329920ULL,
+    2924326828590977357ULL, 5687195797140589099ULL, 16880427227292834531ULL, 9691471435758991112ULL,
+    16642385273732487288ULL, 12173806747523009914ULL, 13142722756877876849ULL,
+    8370377548305121979ULL, 17988526053752025426ULL, 4818750752684100334ULL, 5669241919350361655ULL,
+    4964810303238518540ULL, 16709712747671533191ULL, 4461414404267448242ULL, 3971798785139504238ULL,
+    6276818948740422136ULL, 1426735892164275762ULL, 7943622674892418919ULL, 9864274225563929680ULL,
+    57815533745003233ULL, 10893588105168960233ULL, 15739162732907069535ULL, 3923866849462073470ULL,
+    12279826158399226875ULL, 1533015761334846582ULL, 15860156818568437510ULL,
+    8252625373831297988ULL, 9666953804812706358ULL, 8767785238646914634ULL, 14382179044941403551ULL,
+    10401039907264254245ULL, 8584860003763157350ULL, 3120462679504470266ULL, 8670255778748340069ULL,
+    5313789577940369984ULL, 16977072364454789224ULL, 12199578693972188324ULL,
+    18211098771672599237ULL, 12868831556008795030ULL, 5310155061431048194ULL,
+    18114153238435112606ULL, 14482365809278304512ULL, 12520721662723001511ULL,
+    405943624021143002ULL, 8146944101507657423ULL, 181739317780393495ULL, 81743892273670099ULL,
+    14759561962550473930ULL, 4592623849546992939ULL, 6916440441743449719ULL, 1304610503530809833ULL,
+    5464930909232486441ULL, 15414883617496224671ULL, 8129283345256790ULL, 18294252198413739489ULL,
+    17394115281884857288ULL, 7808348415224731235ULL, 13195566655747230608ULL,
+    8568194219353949094ULL, 15329813048672122440ULL, 9604275495885785744ULL, 1577712551205219835ULL,
+    15964209008022052790ULL, 15087297920782098160ULL, 3946031512438511898ULL,
+    10050061168984440631ULL, 11382452014533138316ULL, 6313670788911952792ULL,
+    12015989229696164014ULL, 5946702628076168852ULL, 5219995658774362841ULL,
+    12230141881068377972ULL, 12361195202673441956ULL, 4732862275653856711ULL,
+    17221430380805252370ULL, 15397525953897375810ULL, 16557437297239563045ULL,
+    10101683801868971351ULL, 1402611372245592868ULL, 1931806383735563658ULL,
+    10991705207471512479ULL, 861333583207471392ULL, 15207766844626322355ULL, 9224628129811432393ULL,
+    3497069567089055613ULL, 11956632757898590316ULL, 8733729372586312960ULL,
+    18091521051714930927ULL, 77582787724373283ULL, 9922437373519669237ULL, 3079321456325704615ULL,
+    12171198408512478457ULL, 17179130884012147596ULL, 6839115479620367181ULL,
+    4421032569964105406ULL, 10353331468657256053ULL, 17400988720335968824ULL,
+    17138855889417480540ULL, 4507980080381370611ULL, 10703175719793781886ULL,
+    12598516658725890426ULL, 8353463412173898932ULL, 17703029389228422404ULL,
+    9313111267107226233ULL, 5441322942995154196ULL, 8952817660034465484ULL, 17571113341183703118ULL,
+    7375087953801067019ULL, 13381466302076453648ULL, 3218165271423914596ULL,
+    16956372157249382685ULL, 509080090049418841ULL, 13374233893294084913ULL, 2988537624204297086ULL,
+    4979195832939384620ULL, 3803931594068976394ULL, 10731535883829627646ULL,
+    12954845047607194278ULL, 10494298062560667399ULL, 4967351022190213065ULL,
+    13391917938145756456ULL, 951370484866918160ULL, 13531334179067685307ULL,
+    12868421357919390599ULL, 15918857042998130258ULL, 17769743831936974016ULL,
+    7137921979260368809ULL, 12461369180685892062ULL, 827476514081935199ULL, 15107282134224767230ULL,
+    10084765752802805748ULL, 3303739059392464407ULL, 17859532612136591428ULL,
+    10949414770405040164ULL, 12838613589371008785ULL, 5554397169231540728ULL,
+    18375114572169624408ULL, 15649286703242390139ULL, 2957281557463706877ULL,
+    14000350446219393213ULL, 14355199721749620351ULL, 2730856240099299695ULL,
+    17528131000714705752ULL, 2537498525883536360ULL, 6121058967084509393ULL,
+    16897667060435514221ULL, 12367869599571112440ULL, 3388831797050807508ULL,
+    16791449724090982798ULL, 2673426123453294928ULL, 11369313542384405846ULL,
+    15641960333586432634ULL, 15080962589658958379ULL, 7747943772340226569ULL,
+    8075023376199159152ULL, 8485093027378306528ULL, 13503706844122243648ULL, 8401961362938086226ULL,
+    8125426002124226402ULL, 9005399361407785203ULL, 6847968030066906634ULL, 11934937736309295197ULL,
+    5116750888594772351ULL, 2817039227179245227ULL, 17724206901239332980ULL, 4985702708254058578ULL,
+    5786345435756642871ULL, 17772527414940936938ULL, 1201320251272957006ULL,
+    15787430120324348129ULL, 6305488781359965661ULL, 12423900845502858433ULL,
+    17485949424202277720ULL, 2062237315546855852ULL, 10353639467860902375ULL,
+    2315398490451287299ULL, 15394572894814882621ULL, 232866113801165640ULL, 7413443736109338926ULL,
+    902719806551551191ULL, 16568853118619045174ULL, 14202214862428279177ULL,
+    11719595395278861192ULL, 5890053236389907647ULL, 9996196494965833627ULL,
+    12967056942364782577ULL, 9034128755157395787ULL, 17898204904710512655ULL,
+    8229373445062993977ULL, 13580036169519833644ULL
   };
 
 #if defined(__cplusplus)
diff --git a/info.txt b/info.txt
index af3dbf98..61cb7666 100644
--- a/info.txt
+++ b/info.txt
@@ -1,5 +1,5 @@
 The code was generated with the following toolchain.
-F* version: bc622701c668f6b4092760879372968265d4a4e1
-KaRaMeL version: 7cffd27cfefbd220e986e561e8d350f043609f76
-HACL* version: 1b30697fc2b0d8d5e2f541eccfd3fb52b45b905c
+F* version: f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f
+Karamel version: a7be2a7c43eca637ceb57fe8f3ffd16fc6627ebd
+HACL* version: a73f8d6451c15f0ae3df1ba1b1c223f36015b3cd
 Vale version: 0.3.19
diff --git a/js/api.js b/js/api.js
index bd8c6c0d..5ad3bdde 100644
--- a/js/api.js
+++ b/js/api.js
@@ -217,9 +217,9 @@ var HaclWasm = (function() {
   // We defined a few WASM-specific "compile-time macros".
   var my_imports = {
     EverCrypt_TargetConfig: (mem) => ({
-      hacl_can_compile_vale: 0,
-      hacl_can_compile_vec128: 0,
-      hacl_can_compile_vec256: 0,
+      HACL_CAN_COMPILE_VALE: 0,
+      HACL_CAN_COMPILE_VEC128: 0,
+      HACL_CAN_COMPILE_VEC256: 0,
       has_vec128_not_avx: () => false,
       has_vec256_not_avx2: () => false,
     }),
diff --git a/karamel/include/krml/internal/builtin.h b/karamel/include/krml/internal/builtin.h
index 53ea9040..6098f30b 100644
--- a/karamel/include/krml/internal/builtin.h
+++ b/karamel/include/krml/internal/builtin.h
@@ -7,6 +7,8 @@
 /* For alloca, when using KaRaMeL's -falloca */
 #if (defined(_WIN32) || defined(_WIN64))
 #  include <malloc.h>
+#elif (defined(sun))
+#  include <alloca.h>
 #endif
 
 /* If some globals need to be initialized before the main, then karamel will
diff --git a/karamel/include/krml/internal/target.h b/karamel/include/krml/internal/target.h
index 4903d224..d4252a10 100644
--- a/karamel/include/krml/internal/target.h
+++ b/karamel/include/krml/internal/target.h
@@ -57,6 +57,10 @@
 #  define KRML_HOST_IGNORE(x) (void)(x)
 #endif
 
+#ifndef KRML_MAYBE_UNUSED_VAR
+#  define KRML_MAYBE_UNUSED_VAR(x) KRML_HOST_IGNORE(x)
+#endif
+
 #ifndef KRML_MAYBE_UNUSED
 #  if defined(__GNUC__)
 #    define KRML_MAYBE_UNUSED __attribute__((unused))
diff --git a/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h b/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h
index 4bf0a34a..9e4e2290 100644
--- a/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h
+++ b/karamel/krmllib/dist/minimal/FStar_UInt128_Verified.h
@@ -15,7 +15,7 @@
 
 static inline uint64_t FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
 {
-  return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
+  return (a ^ ((a ^ b) | ((a - b) ^ b))) >> 63U;
 }
 
 static inline uint64_t FStar_UInt128_carry(uint64_t a, uint64_t b)
@@ -118,7 +118,7 @@ static inline FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a
   return lit;
 }
 
-static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
+static uint32_t FStar_UInt128_u32_64 = 64U;
 
 static inline uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
 {
@@ -134,7 +134,7 @@ FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
 static inline FStar_UInt128_uint128
 FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
 {
-  if (s == (uint32_t)0U)
+  if (s == 0U)
   {
     return a;
   }
@@ -151,7 +151,7 @@ static inline FStar_UInt128_uint128
 FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
 {
   FStar_UInt128_uint128 lit;
-  lit.low = (uint64_t)0U;
+  lit.low = 0ULL;
   lit.high = a.low << (s - FStar_UInt128_u32_64);
   return lit;
 }
@@ -183,7 +183,7 @@ FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
 static inline FStar_UInt128_uint128
 FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
 {
-  if (s == (uint32_t)0U)
+  if (s == 0U)
   {
     return a;
   }
@@ -201,7 +201,7 @@ FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
 {
   FStar_UInt128_uint128 lit;
   lit.low = a.high >> (s - FStar_UInt128_u32_64);
-  lit.high = (uint64_t)0U;
+  lit.high = 0ULL;
   return lit;
 }
 
@@ -269,7 +269,7 @@ static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a)
 {
   FStar_UInt128_uint128 lit;
   lit.low = a;
-  lit.high = (uint64_t)0U;
+  lit.high = 0ULL;
   return lit;
 }
 
@@ -280,10 +280,10 @@ static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
 
 static inline uint64_t FStar_UInt128_u64_mod_32(uint64_t a)
 {
-  return a & (uint64_t)0xffffffffU;
+  return a & 0xffffffffULL;
 }
 
-static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
+static uint32_t FStar_UInt128_u32_32 = 32U;
 
 static inline uint64_t FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo)
 {
diff --git a/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h b/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h
index 84356c61..56a2454f 100644
--- a/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h
+++ b/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h
@@ -35,10 +35,10 @@ extern uint32_t FStar_UInt64_n_minus_one;
 static KRML_NOINLINE uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
 {
   uint64_t x = a ^ b;
-  uint64_t minus_x = ~x + (uint64_t)1U;
+  uint64_t minus_x = ~x + 1ULL;
   uint64_t x_or_minus_x = x | minus_x;
-  uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
-  return xnx - (uint64_t)1U;
+  uint64_t xnx = x_or_minus_x >> 63U;
+  return xnx - 1ULL;
 }
 
 static KRML_NOINLINE uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
@@ -50,8 +50,8 @@ static KRML_NOINLINE uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
   uint64_t x_sub_y_xor_y = x_sub_y ^ y;
   uint64_t q = x_xor_y | x_sub_y_xor_y;
   uint64_t x_xor_q = x ^ q;
-  uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
-  return x_xor_q_ - (uint64_t)1U;
+  uint64_t x_xor_q_ = x_xor_q >> 63U;
+  return x_xor_q_ - 1ULL;
 }
 
 extern Prims_string FStar_UInt64_to_string(uint64_t uu___);
@@ -83,10 +83,10 @@ extern uint32_t FStar_UInt32_n_minus_one;
 static KRML_NOINLINE uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
 {
   uint32_t x = a ^ b;
-  uint32_t minus_x = ~x + (uint32_t)1U;
+  uint32_t minus_x = ~x + 1U;
   uint32_t x_or_minus_x = x | minus_x;
-  uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
-  return xnx - (uint32_t)1U;
+  uint32_t xnx = x_or_minus_x >> 31U;
+  return xnx - 1U;
 }
 
 static KRML_NOINLINE uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
@@ -98,8 +98,8 @@ static KRML_NOINLINE uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
   uint32_t x_sub_y_xor_y = x_sub_y ^ y;
   uint32_t q = x_xor_y | x_sub_y_xor_y;
   uint32_t x_xor_q = x ^ q;
-  uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
-  return x_xor_q_ - (uint32_t)1U;
+  uint32_t x_xor_q_ = x_xor_q >> 31U;
+  return x_xor_q_ - 1U;
 }
 
 extern Prims_string FStar_UInt32_to_string(uint32_t uu___);
@@ -130,24 +130,24 @@ extern uint32_t FStar_UInt16_n_minus_one;
 
 static KRML_NOINLINE uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
 {
-  uint16_t x = a ^ b;
-  uint16_t minus_x = ~x + (uint16_t)1U;
-  uint16_t x_or_minus_x = x | minus_x;
-  uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
-  return xnx - (uint16_t)1U;
+  uint16_t x = (uint32_t)a ^ (uint32_t)b;
+  uint16_t minus_x = (uint32_t)~x + 1U;
+  uint16_t x_or_minus_x = (uint32_t)x | (uint32_t)minus_x;
+  uint16_t xnx = (uint32_t)x_or_minus_x >> 15U;
+  return (uint32_t)xnx - 1U;
 }
 
 static KRML_NOINLINE uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
 {
   uint16_t x = a;
   uint16_t y = b;
-  uint16_t x_xor_y = x ^ y;
-  uint16_t x_sub_y = x - y;
-  uint16_t x_sub_y_xor_y = x_sub_y ^ y;
-  uint16_t q = x_xor_y | x_sub_y_xor_y;
-  uint16_t x_xor_q = x ^ q;
-  uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
-  return x_xor_q_ - (uint16_t)1U;
+  uint16_t x_xor_y = (uint32_t)x ^ (uint32_t)y;
+  uint16_t x_sub_y = (uint32_t)x - (uint32_t)y;
+  uint16_t x_sub_y_xor_y = (uint32_t)x_sub_y ^ (uint32_t)y;
+  uint16_t q = (uint32_t)x_xor_y | (uint32_t)x_sub_y_xor_y;
+  uint16_t x_xor_q = (uint32_t)x ^ (uint32_t)q;
+  uint16_t x_xor_q_ = (uint32_t)x_xor_q >> 15U;
+  return (uint32_t)x_xor_q_ - 1U;
 }
 
 extern Prims_string FStar_UInt16_to_string(uint16_t uu___);
@@ -178,24 +178,24 @@ extern uint32_t FStar_UInt8_n_minus_one;
 
 static KRML_NOINLINE uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
 {
-  uint8_t x = a ^ b;
-  uint8_t minus_x = ~x + (uint8_t)1U;
-  uint8_t x_or_minus_x = x | minus_x;
-  uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
-  return xnx - (uint8_t)1U;
+  uint8_t x = (uint32_t)a ^ (uint32_t)b;
+  uint8_t minus_x = (uint32_t)~x + 1U;
+  uint8_t x_or_minus_x = (uint32_t)x | (uint32_t)minus_x;
+  uint8_t xnx = (uint32_t)x_or_minus_x >> 7U;
+  return (uint32_t)xnx - 1U;
 }
 
 static KRML_NOINLINE uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
 {
   uint8_t x = a;
   uint8_t y = b;
-  uint8_t x_xor_y = x ^ y;
-  uint8_t x_sub_y = x - y;
-  uint8_t x_sub_y_xor_y = x_sub_y ^ y;
-  uint8_t q = x_xor_y | x_sub_y_xor_y;
-  uint8_t x_xor_q = x ^ q;
-  uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
-  return x_xor_q_ - (uint8_t)1U;
+  uint8_t x_xor_y = (uint32_t)x ^ (uint32_t)y;
+  uint8_t x_sub_y = (uint32_t)x - (uint32_t)y;
+  uint8_t x_sub_y_xor_y = (uint32_t)x_sub_y ^ (uint32_t)y;
+  uint8_t q = (uint32_t)x_xor_y | (uint32_t)x_sub_y_xor_y;
+  uint8_t x_xor_q = (uint32_t)x ^ (uint32_t)q;
+  uint8_t x_xor_q_ = (uint32_t)x_xor_q >> 7U;
+  return (uint32_t)x_xor_q_ - 1U;
 }
 
 extern Prims_string FStar_UInt8_to_string(uint8_t uu___);
diff --git a/ocaml/lib/Hacl_Bignum_bindings.ml b/ocaml/lib/Hacl_Bignum_bindings.ml
index 4674e2b4..0102681f 100644
--- a/ocaml/lib/Hacl_Bignum_bindings.ml
+++ b/ocaml/lib/Hacl_Bignum_bindings.ml
@@ -62,12 +62,6 @@ module Bindings(F:Cstubs.FOREIGN) =
         (uint32_t @->
            (uint32_t @->
               ((ptr uint32_t) @-> ((ptr uint32_t) @-> (returning void)))))
-    let hacl_Bignum_Montgomery_bn_mont_reduction_u32 =
-      foreign "Hacl_Bignum_Montgomery_bn_mont_reduction_u32"
-        (uint32_t @->
-           ((ptr uint32_t) @->
-              (uint32_t @->
-                 ((ptr uint32_t) @-> ((ptr uint32_t) @-> (returning void))))))
     let hacl_Bignum_Montgomery_bn_to_mont_u32 =
       foreign "Hacl_Bignum_Montgomery_bn_to_mont_u32"
         (uint32_t @->
@@ -102,12 +96,6 @@ module Bindings(F:Cstubs.FOREIGN) =
         (uint32_t @->
            (uint32_t @->
               ((ptr uint64_t) @-> ((ptr uint64_t) @-> (returning void)))))
-    let hacl_Bignum_Montgomery_bn_mont_reduction_u64 =
-      foreign "Hacl_Bignum_Montgomery_bn_mont_reduction_u64"
-        (uint32_t @->
-           ((ptr uint64_t) @->
-              (uint64_t @->
-                 ((ptr uint64_t) @-> ((ptr uint64_t) @-> (returning void))))))
     let hacl_Bignum_Montgomery_bn_to_mont_u64 =
       foreign "Hacl_Bignum_Montgomery_bn_to_mont_u64"
         (uint32_t @->
@@ -134,6 +122,18 @@ module Bindings(F:Cstubs.FOREIGN) =
            ((ptr uint64_t) @->
               (uint64_t @->
                  ((ptr uint64_t) @-> ((ptr uint64_t) @-> (returning void))))))
+    let hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32 =
+      foreign "Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32"
+        (uint32_t @->
+           ((ptr uint32_t) @->
+              (uint32_t @->
+                 ((ptr uint32_t) @-> ((ptr uint32_t) @-> (returning void))))))
+    let hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64 =
+      foreign "Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64"
+        (uint32_t @->
+           ((ptr uint64_t) @->
+              (uint64_t @->
+                 ((ptr uint64_t) @-> ((ptr uint64_t) @-> (returning void))))))
     let hacl_Bignum_Exponentiation_bn_check_mod_exp_u32 =
       foreign "Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32"
         (uint32_t @->
diff --git a/src/EverCrypt_AEAD.c b/src/EverCrypt_AEAD.c
index d3a4ffbe..b0fb4826 100644
--- a/src/EverCrypt_AEAD.c
+++ b/src/EverCrypt_AEAD.c
@@ -46,8 +46,8 @@ The state may be reused as many times as desired.
 */
 bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -86,11 +86,11 @@ Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s)
 static EverCrypt_Error_error_code
 create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
   EverCrypt_AEAD_state_s
   *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
   p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek });
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   dst[0U] = p;
   return EverCrypt_Error_Success;
 }
@@ -98,8 +98,8 @@ create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -108,11 +108,11 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)480U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(480U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 176U;
+    aes128_key_expansion(k, keys_b);
+    aes128_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek });
@@ -128,8 +128,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -138,11 +138,11 @@ create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)544U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(544U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 240U;
+    aes256_key_expansion(k, keys_b);
+    aes256_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek });
@@ -208,115 +208,106 @@ encrypt_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -340,115 +331,106 @@ encrypt_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -510,7 +492,7 @@ EverCrypt_AEAD_encrypt(
       }
     case Spec_Cipher_Expansion_Hacl_CHACHA20:
       {
-        if (iv_len != (uint32_t)12U)
+        if (iv_len != 12U)
         {
           return EverCrypt_Error_InvalidIVLength;
         }
@@ -546,124 +528,115 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -697,124 +670,115 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -840,15 +804,15 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -859,112 +823,103 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)304U;
+      uint8_t *scratch_b = ek0 + 304U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+      uint8_t *hkeys_b = ek1 + 176U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -988,15 +943,15 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1007,112 +962,103 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)368U;
+      uint8_t *scratch_b = ek0 + 368U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+      uint8_t *hkeys_b = ek1 + 240U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -1136,10 +1082,10 @@ EverCrypt_AEAD_encrypt_expand_chacha20_poly1305(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(iv_len);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   uint8_t *ek0 = (*s).ek;
   EverCrypt_Chacha20Poly1305_aead_encrypt(ek0, iv, ad_len, ad, plain_len, plain, cipher, tag);
@@ -1222,66 +1168,57 @@ decrypt_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1305,15 +1242,15 @@ decrypt_aes128_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1335,11 +1272,11 @@ decrypt_aes128_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1366,66 +1303,57 @@ decrypt_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1449,15 +1377,15 @@ decrypt_aes256_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1479,11 +1407,11 @@ decrypt_aes256_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1514,14 +1442,14 @@ decrypt_chacha20_poly1305(
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len != (uint32_t)12U)
+  if (iv_len != 12U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
   uint32_t
   r = EverCrypt_Chacha20Poly1305_aead_decrypt(ek, iv, ad_len, ad, cipher_len, dst, cipher, tag);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return EverCrypt_Error_Success;
   }
@@ -1620,73 +1548,64 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)304U;
+  uint8_t *scratch_b = ek0 + 304U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1710,15 +1629,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1740,11 +1659,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1779,73 +1698,64 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)368U;
+  uint8_t *scratch_b = ek0 + 368U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1869,15 +1779,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1899,11 +1809,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1930,15 +1840,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1949,61 +1859,52 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2027,15 +1928,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2057,11 +1958,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2086,15 +1987,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -2105,61 +2006,52 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2183,15 +2075,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2213,11 +2105,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2244,7 +2136,7 @@ EverCrypt_AEAD_decrypt_expand_chacha20_poly1305(
 {
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   EverCrypt_Error_error_code
   r = decrypt_chacha20_poly1305(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst);
diff --git a/src/EverCrypt_AutoConfig2.c b/src/EverCrypt_AutoConfig2.c
index b549d020..5a92d995 100644
--- a/src/EverCrypt_AutoConfig2.c
+++ b/src/EverCrypt_AutoConfig2.c
@@ -113,59 +113,59 @@ void EverCrypt_AutoConfig2_recall(void)
 void EverCrypt_AutoConfig2_init(void)
 {
   #if HACL_CAN_COMPILE_VALE
-  if (check_aesni() != (uint64_t)0U)
+  if (check_aesni() != 0ULL)
   {
     cpu_has_aesni[0U] = true;
     cpu_has_pclmulqdq[0U] = true;
   }
-  if (check_sha() != (uint64_t)0U)
+  if (check_sha() != 0ULL)
   {
     cpu_has_shaext[0U] = true;
   }
-  if (check_adx_bmi2() != (uint64_t)0U)
+  if (check_adx_bmi2() != 0ULL)
   {
     cpu_has_bmi2[0U] = true;
     cpu_has_adx[0U] = true;
   }
-  if (check_avx() != (uint64_t)0U)
+  if (check_avx() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx[0U] = true;
       }
     }
   }
-  if (check_avx2() != (uint64_t)0U)
+  if (check_avx2() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx2[0U] = true;
       }
     }
   }
-  if (check_sse() != (uint64_t)0U)
+  if (check_sse() != 0ULL)
   {
     cpu_has_sse[0U] = true;
   }
-  if (check_movbe() != (uint64_t)0U)
+  if (check_movbe() != 0ULL)
   {
     cpu_has_movbe[0U] = true;
   }
-  if (check_rdrand() != (uint64_t)0U)
+  if (check_rdrand() != 0ULL)
   {
     cpu_has_rdrand[0U] = true;
   }
-  if (check_avx512() != (uint64_t)0U)
+  if (check_avx512() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
-        if (check_avx512_xcr0() != (uint64_t)0U)
+        if (check_avx512_xcr0() != 0ULL)
         {
           cpu_has_avx512[0U] = true;
           return;
diff --git a/src/EverCrypt_Chacha20Poly1305.c b/src/EverCrypt_Chacha20Poly1305.c
index 9a110bbf..0ff2d448 100644
--- a/src/EverCrypt_Chacha20Poly1305.c
+++ b/src/EverCrypt_Chacha20Poly1305.c
@@ -44,7 +44,7 @@ EverCrypt_Chacha20Poly1305_aead_encrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
+    KRML_MAYBE_UNUSED_VAR(vec128);
     Hacl_Chacha20Poly1305_256_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
     return;
   }
@@ -52,13 +52,13 @@ EverCrypt_Chacha20Poly1305_aead_encrypt(
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
+    KRML_MAYBE_UNUSED_VAR(vec256);
     Hacl_Chacha20Poly1305_128_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
   Hacl_Chacha20Poly1305_32_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
 }
 
@@ -79,19 +79,19 @@ EverCrypt_Chacha20Poly1305_aead_decrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
+    KRML_MAYBE_UNUSED_VAR(vec128);
     return Hacl_Chacha20Poly1305_256_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
+    KRML_MAYBE_UNUSED_VAR(vec256);
     return Hacl_Chacha20Poly1305_128_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
   return Hacl_Chacha20Poly1305_32_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
 }
 
diff --git a/src/EverCrypt_DRBG.c b/src/EverCrypt_DRBG.c
index 13e517e5..301fe528 100644
--- a/src/EverCrypt_DRBG.c
+++ b/src/EverCrypt_DRBG.c
@@ -28,15 +28,15 @@
 #include "internal/EverCrypt_HMAC.h"
 #include "lib_memzero0.h"
 
-uint32_t EverCrypt_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t EverCrypt_DRBG_reseed_interval = 1024U;
 
-uint32_t EverCrypt_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_output_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_additional_input_length = 65536U;
 
 uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 {
@@ -44,19 +44,19 @@ uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -92,7 +92,7 @@ EverCrypt_DRBG_uu___is_SHA1_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA1_s)
   {
     return true;
@@ -106,7 +106,7 @@ EverCrypt_DRBG_uu___is_SHA2_256_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_256_s)
   {
     return true;
@@ -120,7 +120,7 @@ EverCrypt_DRBG_uu___is_SHA2_384_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_384_s)
   {
     return true;
@@ -134,7 +134,7 @@ EverCrypt_DRBG_uu___is_SHA2_512_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_512_s)
   {
     return true;
@@ -149,10 +149,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -164,10 +164,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -179,10 +179,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -194,10 +194,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -247,7 +247,7 @@ instantiate_sha1(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -282,45 +282,43 @@ instantiate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 20U * sizeof (uint8_t));
+  memset(v, 1U, 20U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   return true;
 }
@@ -337,7 +335,7 @@ instantiate_sha2_256(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -372,45 +370,43 @@ instantiate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 32U * sizeof (uint8_t));
+  memset(v, 1U, 32U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   return true;
 }
@@ -427,7 +423,7 @@ instantiate_sha2_384(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -462,45 +458,43 @@ instantiate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 48U * sizeof (uint8_t));
+  memset(v, 1U, 48U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   return true;
 }
@@ -517,7 +511,7 @@ instantiate_sha2_512(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -552,45 +546,43 @@ instantiate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 64U * sizeof (uint8_t));
+  memset(v, 1U, 64U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   return true;
 }
@@ -635,42 +627,42 @@ reseed_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 21U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -714,42 +706,42 @@ reseed_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 33U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -793,42 +785,42 @@ reseed_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 49U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -872,42 +864,42 @@ reseed_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 65U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -970,42 +962,42 @@ generate_sha1(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 21U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)21U,
+        memcpy(input0 + 21U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[20U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-      EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[20U] = 0U;
+      EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+      EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+      memcpy(k, k_, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)21U,
+          memcpy(input + 21U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[20U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-        EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+        input[20U] = 1U;
+        EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+        EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+        memcpy(k, k_0, 20U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1015,16 +1007,16 @@ generate_sha1(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA1_s)
   {
-    x1 = st_s.case_SHA1_s;
+    ite = st_s.case_SHA1_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1040,87 +1032,87 @@ generate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)21U + additional_input_len;
+    uint32_t input_len = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[20U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-    EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[20U] = 0U;
+    EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+    EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+    memcpy(k, k_, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+      uint32_t input_len0 = 21U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 20U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[20U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-      EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+      input[20U] = 1U;
+      EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+      EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+      memcpy(k, k_0, 20U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)20U;
+  uint32_t max = n / 20U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)20U < n)
+  if (max * 20U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)20U;
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 20U;
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)21U + additional_input_len;
+  uint32_t input_len = 21U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+    uint32_t input_len0 = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1183,42 +1175,42 @@ generate_sha2_256(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 33U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)33U,
+        memcpy(input0 + 33U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[32U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[32U] = 0U;
+      EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+      memcpy(k, k_, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)33U,
+          memcpy(input + 33U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[32U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+        input[32U] = 1U;
+        EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+        memcpy(k, k_0, 32U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1228,16 +1220,16 @@ generate_sha2_256(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_256_s)
   {
-    x1 = st_s.case_SHA2_256_s;
+    ite = st_s.case_SHA2_256_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1253,87 +1245,87 @@ generate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)33U + additional_input_len;
+    uint32_t input_len = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[32U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[32U] = 0U;
+    EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+    memcpy(k, k_, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+      uint32_t input_len0 = 33U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 32U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[32U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+      input[32U] = 1U;
+      EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+      memcpy(k, k_0, 32U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)32U;
+  uint32_t max = n / 32U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)32U < n)
+  if (max * 32U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)32U;
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 32U;
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)33U + additional_input_len;
+  uint32_t input_len = 33U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+    uint32_t input_len0 = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1396,42 +1388,42 @@ generate_sha2_384(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 49U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)49U,
+        memcpy(input0 + 49U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[48U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[48U] = 0U;
+      EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+      memcpy(k, k_, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)49U,
+          memcpy(input + 49U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[48U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+        input[48U] = 1U;
+        EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+        memcpy(k, k_0, 48U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1441,16 +1433,16 @@ generate_sha2_384(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_384_s)
   {
-    x1 = st_s.case_SHA2_384_s;
+    ite = st_s.case_SHA2_384_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1466,87 +1458,87 @@ generate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)49U + additional_input_len;
+    uint32_t input_len = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[48U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[48U] = 0U;
+    EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+    memcpy(k, k_, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+      uint32_t input_len0 = 49U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 48U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[48U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+      input[48U] = 1U;
+      EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+      memcpy(k, k_0, 48U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)48U;
+  uint32_t max = n / 48U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)48U < n)
+  if (max * 48U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)48U;
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 48U;
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)49U + additional_input_len;
+  uint32_t input_len = 49U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+    uint32_t input_len0 = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1609,42 +1601,42 @@ generate_sha2_512(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 65U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)65U,
+        memcpy(input0 + 65U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[64U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[64U] = 0U;
+      EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+      memcpy(k, k_, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)65U,
+          memcpy(input + 65U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[64U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+        input[64U] = 1U;
+        EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+        memcpy(k, k_0, 64U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1654,16 +1646,16 @@ generate_sha2_512(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_512_s)
   {
-    x1 = st_s.case_SHA2_512_s;
+    ite = st_s.case_SHA2_512_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1679,87 +1671,87 @@ generate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)65U + additional_input_len;
+    uint32_t input_len = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[64U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[64U] = 0U;
+    EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+    memcpy(k, k_, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+      uint32_t input_len0 = 65U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 64U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[64U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+      input[64U] = 1U;
+      EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+      memcpy(k, k_0, 64U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)64U;
+  uint32_t max = n / 64U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)64U < n)
+  if (max * 64U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)64U;
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 64U;
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)65U + additional_input_len;
+  uint32_t input_len = 65U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+    uint32_t input_len0 = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1778,9 +1770,9 @@ static void uninstantiate_sha1(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)20U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)20U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 20U, uint8_t);
+  Lib_Memzero0_memzero(v, 20U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1802,9 +1794,9 @@ static void uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)32U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 32U, uint8_t);
+  Lib_Memzero0_memzero(v, 32U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1826,9 +1818,9 @@ static void uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)48U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 48U, uint8_t);
+  Lib_Memzero0_memzero(v, 48U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1850,9 +1842,9 @@ static void uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)64U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 64U, uint8_t);
+  Lib_Memzero0_memzero(v, 64U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
diff --git a/src/EverCrypt_HKDF.c b/src/EverCrypt_HKDF.c
index 796a8424..773f86b8 100644
--- a/src/EverCrypt_HKDF.c
+++ b/src/EverCrypt_HKDF.c
@@ -37,39 +37,39 @@ expand_sha1(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)20U;
+  uint32_t tlen = 20U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -92,39 +92,39 @@ expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -147,39 +147,39 @@ expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -202,39 +202,39 @@ expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -257,39 +257,39 @@ expand_blake2s(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -312,39 +312,39 @@ expand_blake2b(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/EverCrypt_HMAC.c b/src/EverCrypt_HMAC.c
index 91440e61..b2df0edb 100644
--- a/src/EverCrypt_HMAC.c
+++ b/src/EverCrypt_HMAC.c
@@ -79,23 +79,23 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -105,42 +105,37 @@ EverCrypt_HMAC_compute_sha1(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -152,25 +147,21 @@ EverCrypt_HMAC_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
+    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, 1U);
     Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -181,12 +172,9 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
+  Hacl_Hash_SHA1_legacy_update_multi(s, opad, 1U);
   Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
+  Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
 }
 
@@ -199,23 +187,23 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -225,48 +213,45 @@ EverCrypt_HMAC_compute_sha2_256(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_SHA2_Scalar32_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -278,9 +263,9 @@ EverCrypt_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    EverCrypt_Hash_update_multi_256(s, ipad, (uint32_t)1U);
+    EverCrypt_Hash_update_multi_256(s, ipad, 1U);
     EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
       + (uint64_t)full_blocks_len
       + (uint64_t)rem_len,
       rem_len,
@@ -290,15 +275,14 @@ EverCrypt_HMAC_compute_sha2_256(
   Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -309,9 +293,9 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  EverCrypt_Hash_update_multi_256(s, opad, (uint32_t)1U);
+  EverCrypt_Hash_update_multi_256(s, opad, 1U);
   EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
     + (uint64_t)full_blocks_len
     + (uint64_t)rem_len,
     rem_len,
@@ -329,23 +313,23 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -355,49 +339,49 @@ EverCrypt_HMAC_compute_sha2_384(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -409,9 +393,9 @@ EverCrypt_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -421,15 +405,14 @@ EverCrypt_HMAC_compute_sha2_384(
   Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -440,9 +423,9 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -460,23 +443,23 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -486,49 +469,49 @@ EverCrypt_HMAC_compute_sha2_512(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -540,9 +523,9 @@ EverCrypt_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -552,15 +535,14 @@ EverCrypt_HMAC_compute_sha2_512(
   Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -571,9 +553,9 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -591,66 +573,66 @@ EverCrypt_HMAC_compute_blake2s(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2s_32_blake2s(32U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Blake2s_32_blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -663,9 +645,9 @@ EverCrypt_HMAC_compute_blake2s(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
@@ -675,22 +657,21 @@ EverCrypt_HMAC_compute_blake2s(
     Hacl_Blake2s_32_blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Blake2s_32_blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -702,9 +683,9 @@ EverCrypt_HMAC_compute_blake2s(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
@@ -714,10 +695,10 @@ EverCrypt_HMAC_compute_blake2s(
   Hacl_Blake2s_32_blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst, s0);
 }
 
 void
@@ -729,71 +710,71 @@ EverCrypt_HMAC_compute_blake2b(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2b_32_blake2b(64U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -806,14 +787,14 @@ EverCrypt_HMAC_compute_blake2b(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -823,23 +804,22 @@ EverCrypt_HMAC_compute_blake2b(
     Hacl_Blake2b_32_blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Blake2b_32_blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -851,14 +831,14 @@ EverCrypt_HMAC_compute_blake2b(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -868,11 +848,11 @@ EverCrypt_HMAC_compute_blake2b(
   Hacl_Blake2b_32_blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst, s0);
 }
 
 void
diff --git a/src/EverCrypt_Hash.c b/src/EverCrypt_Hash.c
index b88df9e2..ea3a1dea 100644
--- a/src/EverCrypt_Hash.c
+++ b/src/EverCrypt_Hash.c
@@ -146,61 +146,61 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = MD5_s, { .case_MD5_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA1_s, { .case_SHA1_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_224_s, { .case_SHA2_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_256_s, { .case_SHA2_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_384_s, { .case_SHA2_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_512_s, { .case_SHA2_512_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_224_s, { .case_SHA3_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_256_s, { .case_SHA3_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_384_s, { .case_SHA3_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_512_s, { .case_SHA3_512_s = buf } });
         break;
       }
@@ -220,11 +220,11 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
         }
         else
         {
-          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         }
         #else
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         #endif
         break;
@@ -245,11 +245,11 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
         }
         else
         {
-          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         }
         #else
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         #endif
         break;
@@ -308,58 +308,58 @@ static void init(EverCrypt_Hash_state_s *s)
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Blake2s_32_blake2s_init(p1, 0U, 32U);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Blake2s_128_blake2s_init(p1, 0U, 32U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Blake2b_32_blake2b_init(p1, 0U, 64U);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Blake2b_256_blake2b_init(p1, 0U, 64U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -373,22 +373,16 @@ static void init(EverCrypt_Hash_state_s *s)
 static uint32_t
 k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
@@ -399,13 +393,13 @@ void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
   if (has_shaext && has_sse)
   {
     uint64_t n1 = (uint64_t)n;
-    KRML_HOST_IGNORE(sha256_update(s, blocks, n1, k224_256));
+    sha256_update(s, blocks, n1, k224_256);
     return;
   }
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * 64U, blocks, s);
   #else
   KRML_HOST_IGNORE(k224_256);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * 64U, blocks, s);
   #endif
 }
 
@@ -416,100 +410,100 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     Hacl_Hash_MD5_legacy_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     Hacl_Hash_SHA1_legacy_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    uint32_t n = len / (uint32_t)144U;
+    uint32_t n = len / 144U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    uint32_t n = len / (uint32_t)136U;
+    uint32_t n = len / 136U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    uint32_t n = len / (uint32_t)104U;
+    uint32_t n = len / 104U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    uint32_t n = len / (uint32_t)72U;
+    uint32_t n = len / 72U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, p1, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Blake2s_32_blake2s_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Blake2s_128_blake2s_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -521,9 +515,9 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -531,7 +525,7 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
       n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -629,7 +623,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
     Hacl_Blake2s_128_blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -658,7 +652,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
       last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -711,58 +705,58 @@ static void finish(EverCrypt_Hash_state_s *s, uint8_t *dst)
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)144U, (uint32_t)28U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 144U, 28U, dst);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)136U, (uint32_t)32U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 136U, 32U, dst);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)104U, (uint32_t)48U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 104U, 48U, dst);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)72U, (uint32_t)64U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 72U, 64U, dst);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Blake2s_32_blake2s_finish(32U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Blake2s_128_blake2s_finish(32U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Blake2b_32_blake2b_finish(64U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Blake2b_256_blake2b_finish(64U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -873,7 +867,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)4U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 4U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA1_s)
@@ -889,7 +883,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)5U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 5U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_224_s)
@@ -905,7 +899,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_256_s)
@@ -921,7 +915,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_384_s)
@@ -937,7 +931,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA2_512_s)
@@ -953,7 +947,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_224_s)
@@ -969,7 +963,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_256_s)
@@ -985,7 +979,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_384_s)
@@ -1001,7 +995,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_512_s)
@@ -1017,7 +1011,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == Blake2S_s)
@@ -1027,7 +1021,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_s)
     {
       uint32_t *p_dst = scrut.case_Blake2S_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint32_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint32_t));
       return;
     }
     if (scrut.tag == Blake2S_128_s)
@@ -1037,7 +1031,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2s_128_load_state128s_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1054,7 +1048,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_s)
     {
       uint64_t *p_dst = scrut.case_Blake2B_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint64_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint64_t));
       return;
     }
     if (scrut.tag == Blake2B_256_s)
@@ -1064,7 +1058,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2b_256_load_state256b_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1081,7 +1075,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_128_s)
     {
       Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.case_Blake2S_128_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
       return;
     }
     if (scrut.tag == Blake2S_s)
@@ -1091,7 +1085,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2s_128_store_state128s_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1108,7 +1102,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_256_s)
     {
       Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.case_Blake2B_256_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
       return;
     }
     if (scrut.tag == Blake2B_s)
@@ -1118,7 +1112,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2b_256_store_state256b_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1201,59 +1195,59 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -1276,7 +1270,7 @@ EverCrypt_Hash_Incremental_hash_state
   uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
   EverCrypt_Hash_state_s *block_state = create_in(a);
   EverCrypt_Hash_Incremental_hash_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   EverCrypt_Hash_Incremental_hash_state
   *p =
     (EverCrypt_Hash_Incremental_hash_state *)KRML_HOST_MALLOC(sizeof (
@@ -1296,10 +1290,10 @@ void EverCrypt_Hash_Incremental_init(EverCrypt_Hash_Incremental_hash_state *s)
   uint8_t *buf = scrut.buf;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = alg_of_state(block_state);
-  KRML_HOST_IGNORE(i);
+  KRML_MAYBE_UNUSED_VAR(i);
   init(block_state);
   EverCrypt_Hash_Incremental_hash_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -1326,72 +1320,72 @@ EverCrypt_Hash_Incremental_update(
   {
     case Spec_Hash_Definitions_MD5:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     default:
@@ -1408,7 +1402,7 @@ EverCrypt_Hash_Incremental_update(
   else
   {
     uint32_t sz;
-    if (total_len % (uint64_t)block_len(i1) == (uint64_t)0U && total_len > (uint64_t)0U)
+    if (total_len % (uint64_t)block_len(i1) == 0ULL && total_len > 0ULL)
     {
       sz = block_len(i1);
     }
@@ -1423,7 +1417,7 @@ EverCrypt_Hash_Incremental_update(
       uint8_t *buf = s2.buf;
       uint64_t total_len1 = s2.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1444,14 +1438,14 @@ EverCrypt_Hash_Incremental_update(
           }
         );
     }
-    else if (sz == (uint32_t)0U)
+    else if (sz == 0U)
     {
       EverCrypt_Hash_Incremental_hash_state s2 = *s;
       EverCrypt_Hash_state_s *block_state1 = s2.block_state;
       uint8_t *buf = s2.buf;
       uint64_t total_len1 = s2.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1459,13 +1453,13 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
       }
       uint32_t ite0;
-      if ((uint64_t)len % (uint64_t)block_len(i1) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+      if ((uint64_t)len % (uint64_t)block_len(i1) == 0ULL && (uint64_t)len > 0ULL)
       {
         ite0 = block_len(i1);
       }
@@ -1501,7 +1495,7 @@ EverCrypt_Hash_Incremental_update(
       uint8_t *buf0 = s2.buf;
       uint64_t total_len10 = s2.total_len;
       uint32_t sz10;
-      if (total_len10 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+      if (total_len10 % (uint64_t)block_len(i1) == 0ULL && total_len10 > 0ULL)
       {
         sz10 = block_len(i1);
       }
@@ -1526,7 +1520,7 @@ EverCrypt_Hash_Incremental_update(
       uint8_t *buf = s20.buf;
       uint64_t total_len1 = s20.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1534,19 +1528,13 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
       }
       uint32_t ite0;
-      if
-      (
-        (uint64_t)(len - diff)
-        % (uint64_t)block_len(i1)
-        == (uint64_t)0U
-        && (uint64_t)(len - diff) > (uint64_t)0U
-      )
+      if ((uint64_t)(len - diff) % (uint64_t)block_len(i1) == 0ULL && (uint64_t)(len - diff) > 0ULL)
       {
         ite0 = block_len(i1);
       }
@@ -1599,13 +1587,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_MD5)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_MD5) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1620,7 +1602,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_MD5) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_MD5) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1630,7 +1612,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1643,13 +1625,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA1)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA1) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1664,7 +1640,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA1) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA1) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1674,7 +1650,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1688,12 +1664,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1708,7 +1679,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1718,7 +1689,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1732,12 +1703,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1752,7 +1718,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1762,7 +1728,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1776,12 +1742,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1796,7 +1757,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1806,7 +1767,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1820,12 +1781,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1840,7 +1796,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1850,7 +1806,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1864,12 +1820,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1884,7 +1835,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1894,7 +1845,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1908,12 +1859,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1928,7 +1874,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1938,7 +1884,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1952,12 +1898,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1972,7 +1913,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1982,7 +1923,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1996,12 +1937,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2016,7 +1952,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2026,7 +1962,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -2039,13 +1975,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2075,7 +2005,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2S) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2S) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2085,7 +2015,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -2098,13 +2028,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2134,7 +2058,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2B) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2B) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2144,7 +2068,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -2258,24 +2182,24 @@ void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uin
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2296,24 +2220,24 @@ static void hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2403,12 +2327,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec128 = EverCrypt_AutoConfig2_has_vec128();
         if (vec128)
         {
-          Hacl_Blake2s_128_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Blake2s_128_blake2s(32U, dst, len, input, 0U, NULL);
           return;
         }
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2s_32_blake2s(32U, dst, len, input, 0U, NULL);
         #else
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2s_32_blake2s(32U, dst, len, input, 0U, NULL);
         #endif
         break;
       }
@@ -2418,12 +2342,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec256 = EverCrypt_AutoConfig2_has_vec256();
         if (vec256)
         {
-          Hacl_Blake2b_256_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Blake2b_256_blake2b(64U, dst, len, input, 0U, NULL);
           return;
         }
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2b_32_blake2b(64U, dst, len, input, 0U, NULL);
         #else
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2b_32_blake2b(64U, dst, len, input, 0U, NULL);
         #endif
         break;
       }
diff --git a/src/EverCrypt_Poly1305.c b/src/EverCrypt_Poly1305.c
index 454c0fce..f9e1e063 100644
--- a/src/EverCrypt_Poly1305.c
+++ b/src/EverCrypt_Poly1305.c
@@ -31,30 +31,30 @@
 KRML_MAYBE_UNUSED static void
 poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(src);
-  KRML_HOST_IGNORE(len);
-  KRML_HOST_IGNORE(key);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(src);
+  KRML_MAYBE_UNUSED_VAR(len);
+  KRML_MAYBE_UNUSED_VAR(key);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ctx[192U] = { 0U };
-  memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-  uint32_t n_blocks = len / (uint32_t)16U;
-  uint32_t n_extra = len % (uint32_t)16U;
+  memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+  uint32_t n_blocks = len / 16U;
+  uint32_t n_extra = len % 16U;
   uint8_t tmp[16U] = { 0U };
-  if (n_extra == (uint32_t)0U)
+  if (n_extra == 0U)
   {
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U));
+    x64_poly1305(ctx, src, (uint64_t)len, 1ULL);
   }
   else
   {
-    uint32_t len16 = n_blocks * (uint32_t)16U;
+    uint32_t len16 = n_blocks * 16U;
     uint8_t *src16 = src;
     memcpy(tmp, src + len16, n_extra * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U));
-    memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U));
+    x64_poly1305(ctx, src16, (uint64_t)len16, 0ULL);
+    memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+    x64_poly1305(ctx, tmp, (uint64_t)n_extra, 1ULL);
   }
-  memcpy(dst, ctx, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(dst, ctx, 16U * sizeof (uint8_t));
   #endif
 }
 
@@ -65,7 +65,7 @@ void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
+    KRML_MAYBE_UNUSED_VAR(vec128);
     Hacl_Poly1305_256_poly1305_mac(dst, len, src, key);
     return;
   }
@@ -73,13 +73,13 @@ void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
+    KRML_MAYBE_UNUSED_VAR(vec256);
     Hacl_Poly1305_128_poly1305_mac(dst, len, src, key);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec256);
-  KRML_HOST_IGNORE(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
   #if HACL_CAN_COMPILE_VALE
   poly1305_vale(dst, src, len, key);
   #else
diff --git a/src/Hacl_Bignum.c b/src/Hacl_Bignum.c
index fe73faa6..568bcc26 100644
--- a/src/Hacl_Bignum.c
+++ b/src/Hacl_Bignum.c
@@ -37,12 +37,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u32(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *b0 = b;
@@ -52,23 +52,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint32_t c00 = c0;
   uint32_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b0, b1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t1;
-    uint32_t x = (((uint32_t)0U - c010) & t1[i]) | (~((uint32_t)0U - c010) & tmp_[i]);
+    uint32_t x = ((0U - c010) & t1[i]) | (~(0U - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c11 = c010;
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
@@ -81,66 +81,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint32_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
+  uint32_t *t67 = tmp + 3U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c_sign = c00 ^ c11;
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t67);
   uint32_t c31 = c2 - c3;
   uint32_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c41 = c2 + c4;
-  uint32_t mask = (uint32_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint32_t mask = 0U - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint32_t *os = t45;
     uint32_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint32_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c6 = r10;
   uint32_t c60 = c6;
   uint32_t c7 = c5 + c60;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t11 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i0);
-      uint32_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, (uint32_t)0U, res_i1);
-      uint32_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, (uint32_t)0U, res_i2);
-      uint32_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, (uint32_t)0U, res_i);
+      uint32_t t11 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i0);
+      uint32_t t110 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, 0U, res_i1);
+      uint32_t t111 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, 0U, res_i2);
+      uint32_t t112 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t11 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i);
     }
     uint32_t c110 = c;
     r1 = c110;
@@ -152,7 +147,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -164,12 +159,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u64(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *b0 = b;
@@ -179,23 +174,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint64_t c00 = c0;
   uint64_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b0, b1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t1;
-    uint64_t x = (((uint64_t)0U - c010) & t1[i]) | (~((uint64_t)0U - c010) & tmp_[i]);
+    uint64_t x = ((0ULL - c010) & t1[i]) | (~(0ULL - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c11 = c010;
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
@@ -208,66 +203,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint64_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
+  uint64_t *t67 = tmp + 3U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c_sign = c00 ^ c11;
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t67);
   uint64_t c31 = c2 - c3;
   uint64_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c41 = c2 + c4;
-  uint64_t mask = (uint64_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint64_t mask = 0ULL - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t *os = t45;
     uint64_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint64_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c6 = r10;
   uint64_t c60 = c6;
   uint64_t c7 = c5 + c60;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t11 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i0);
-      uint64_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, (uint64_t)0U, res_i1);
-      uint64_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, (uint64_t)0U, res_i2);
-      uint64_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, (uint64_t)0U, res_i);
+      uint64_t t11 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i0);
+      uint64_t t110 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, 0ULL, res_i1);
+      uint64_t t111 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, 0ULL, res_i2);
+      uint64_t t112 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t11 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i);
     }
     uint64_t c110 = c;
     r1 = c110;
@@ -279,7 +269,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -290,27 +280,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u32(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *t0 = tmp;
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, t0, tmp1, t23);
@@ -322,54 +312,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c4 = r10;
   uint32_t c6 = c4;
   uint32_t c7 = c5 + c6;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t1 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t1 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     r1 = c10;
@@ -381,7 +366,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -392,27 +377,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u64(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *t0 = tmp;
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, t0, tmp1, t23);
@@ -424,54 +409,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c4 = r10;
   uint64_t c6 = c4;
   uint64_t c7 = c5 + c6;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t1 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t1 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     r1 = c10;
@@ -483,7 +463,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -495,27 +475,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -526,27 +506,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -555,7 +535,7 @@ Hacl_Bignum_bn_add_mod_n_u32(
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -572,27 +552,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -603,27 +583,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -632,7 +612,7 @@ Hacl_Bignum_bn_add_mod_n_u64(
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -649,27 +629,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -680,27 +660,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -708,9 +688,9 @@ Hacl_Bignum_bn_sub_mod_n_u32(
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -727,27 +707,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -758,27 +738,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -786,9 +766,9 @@ Hacl_Bignum_bn_sub_mod_n_u64(
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -798,42 +778,42 @@ Hacl_Bignum_bn_sub_mod_n_u64(
 
 uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0)
 {
-  uint32_t alpha = (uint32_t)2147483648U;
+  uint32_t alpha = 2147483648U;
   uint32_t beta = n0;
-  uint32_t ub = (uint32_t)0U;
-  uint32_t vb = (uint32_t)0U;
-  ub = (uint32_t)1U;
-  vb = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t ub = 0U;
+  uint32_t vb = 0U;
+  ub = 1U;
+  vb = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint32_t us = ub;
     uint32_t vs = vb;
-    uint32_t u_is_odd = (uint32_t)0U - (us & (uint32_t)1U);
+    uint32_t u_is_odd = 0U - (us & 1U);
     uint32_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint32_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
 
 uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0)
 {
-  uint64_t alpha = (uint64_t)9223372036854775808U;
+  uint64_t alpha = 9223372036854775808ULL;
   uint64_t beta = n0;
-  uint64_t ub = (uint64_t)0U;
-  uint64_t vb = (uint64_t)0U;
-  ub = (uint64_t)1U;
-  vb = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t ub = 0ULL;
+  uint64_t vb = 0ULL;
+  ub = 1ULL;
+  vb = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t us = ub;
     uint64_t vs = vb;
-    uint64_t u_is_odd = (uint64_t)0U - (us & (uint64_t)1U);
+    uint64_t u_is_odd = 0ULL - (us & 1ULL);
     uint64_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint64_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
@@ -844,15 +824,15 @@ uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n)
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   return m0 & m1;
@@ -867,46 +847,40 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(
 )
 {
   memset(res, 0U, len * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U * len - nBits; i0++)
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 64U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u32(len, n, res, res, res);
   }
 }
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
-  uint32_t len,
-  uint32_t *n,
-  uint32_t nInv,
-  uint32_t *c,
-  uint32_t *res
-)
+static void
+bn_mont_reduction_u32(uint32_t len, uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -923,27 +897,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -952,7 +926,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -973,11 +947,11 @@ Hacl_Bignum_Montgomery_bn_to_mont_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv, c, aM);
+  bn_mont_reduction_u32(len, n, nInv, c, aM);
 }
 
 void
@@ -993,7 +967,7 @@ Hacl_Bignum_Montgomery_bn_from_mont_u32(
   uint32_t tmp[len + len];
   memset(tmp, 0U, (len + len) * sizeof (uint32_t));
   memcpy(tmp, aM, len * sizeof (uint32_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, tmp, a);
+  bn_mont_reduction_u32(len, n, nInv_u64, tmp, a);
 }
 
 void
@@ -1009,11 +983,11 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
 void
@@ -1028,11 +1002,11 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
 uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n)
@@ -1041,15 +1015,15 @@ uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n)
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -1064,46 +1038,40 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(
 )
 {
   memset(res, 0U, len * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U * len - nBits; i0++)
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 128U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u64(len, n, res, res, res);
   }
 }
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
-  uint32_t len,
-  uint64_t *n,
-  uint64_t nInv,
-  uint64_t *c,
-  uint64_t *res
-)
+static void
+bn_mont_reduction_u64(uint32_t len, uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1120,27 +1088,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
-  uint64_t c1 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint64_t c1 = 0ULL;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -1149,7 +1117,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -1170,11 +1138,11 @@ Hacl_Bignum_Montgomery_bn_to_mont_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv, c, aM);
+  bn_mont_reduction_u64(len, n, nInv, c, aM);
 }
 
 void
@@ -1190,7 +1158,7 @@ Hacl_Bignum_Montgomery_bn_from_mont_u64(
   uint64_t tmp[len + len];
   memset(tmp, 0U, (len + len) * sizeof (uint64_t));
   memcpy(tmp, aM, len * sizeof (uint64_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, tmp, a);
+  bn_mont_reduction_u64(len, n, nInv_u64, tmp, a);
 }
 
 void
@@ -1206,11 +1174,11 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
 void
@@ -1225,15 +1193,15 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
-static void
-bn_almost_mont_reduction_u32(
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(
   uint32_t len,
   uint32_t *n,
   uint32_t nInv,
@@ -1241,28 +1209,28 @@ bn_almost_mont_reduction_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -1280,9 +1248,9 @@ bn_almost_mont_reduction_u32(
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1303,11 +1271,11 @@ bn_almost_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
-  bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
 static void
@@ -1322,15 +1290,15 @@ bn_almost_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
-  bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
-static void
-bn_almost_mont_reduction_u64(
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(
   uint32_t len,
   uint64_t *n,
   uint64_t nInv,
@@ -1338,28 +1306,28 @@ bn_almost_mont_reduction_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1377,9 +1345,9 @@ bn_almost_mont_reduction_u64(
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1400,11 +1368,11 @@ bn_almost_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
-  bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
 static void
@@ -1419,11 +1387,11 @@ bn_almost_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
-  bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
 uint32_t
@@ -1439,56 +1407,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -1507,19 +1475,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t aM[len];
     memset(aM, 0U, len * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t c[len + len];
-    memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t resM[len];
     memset(resM, 0U, len * sizeof (uint32_t));
@@ -1531,13 +1492,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u32(len, ctx_n0, mu, resM, aM, resM);
@@ -1545,44 +1506,33 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
       uint32_t *ctx_n0 = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n0, mu, aM, aM);
     }
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t tmp[len + len];
-    memset(tmp, 0U, (len + len) * sizeof (uint32_t));
-    memcpy(tmp, resM, len * sizeof (uint32_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t aM[len];
   memset(aM, 0U, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t c[len + len];
-  memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t resM[len];
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1593,21 +1543,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
     const uint32_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
@@ -1619,29 +1569,25 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
-  uint32_t tmp1[len];
-  memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  uint32_t tmp0[len];
+  memset(tmp0, 0U, len * sizeof (uint32_t));
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
     const uint32_t *a_bits_l = table + bits_l32 * len;
-    memcpy(tmp1, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
+    memcpy(tmp0, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t tmp2[len + len];
-  memset(tmp2, 0U, (len + len) * sizeof (uint32_t));
-  memcpy(tmp2, resM, len * sizeof (uint32_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
 }
 
 void
@@ -1656,19 +1602,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t aM[len];
     memset(aM, 0U, len * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t c[len + len];
-    memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t resM[len];
     memset(resM, 0U, len * sizeof (uint32_t));
@@ -1677,20 +1616,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     memset(ctx, 0U, (len + len) * sizeof (uint32_t));
     memcpy(ctx, n, len * sizeof (uint32_t));
     memcpy(ctx + len, r2, len * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -1701,50 +1640,39 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t tmp[len + len];
-    memset(tmp, 0U, (len + len) * sizeof (uint32_t));
-    memcpy(tmp, resM, len * sizeof (uint32_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t aM[len];
   memset(aM, 0U, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t c0[len + len];
-  memset(c0, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c0);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c0, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t resM[len];
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1755,29 +1683,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -1791,39 +1719,35 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
-  uint32_t tmp1[len];
-  memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  uint32_t tmp0[len];
+  memset(tmp0, 0U, len * sizeof (uint32_t));
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint32_t *os = tmp1;
-        uint32_t x = (c & res_j[i]) | (~c & tmp1[i]);
+        uint32_t *os = tmp0;
+        uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;
       });
     uint32_t *ctx_n = ctx;
-    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t tmp2[len + len];
-  memset(tmp2, 0U, (len + len) * sizeof (uint32_t));
-  memcpy(tmp2, resM, len * sizeof (uint32_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
 }
 
 void
@@ -1877,56 +1801,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -1945,19 +1869,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t aM[len];
     memset(aM, 0U, len * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t c[len + len];
-    memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t resM[len];
     memset(resM, 0U, len * sizeof (uint64_t));
@@ -1969,13 +1886,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u64(len, ctx_n0, mu, resM, aM, resM);
@@ -1983,44 +1900,33 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
       uint64_t *ctx_n0 = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n0, mu, aM, aM);
     }
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t tmp[len + len];
-    memset(tmp, 0U, (len + len) * sizeof (uint64_t));
-    memcpy(tmp, resM, len * sizeof (uint64_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t aM[len];
   memset(aM, 0U, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t c[len + len];
-  memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t resM[len];
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2031,21 +1937,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
     const uint64_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
@@ -2057,29 +1963,25 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
-  uint64_t tmp1[len];
-  memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  uint64_t tmp0[len];
+  memset(tmp0, 0U, len * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
     const uint64_t *a_bits_l = table + bits_l32 * len;
-    memcpy(tmp1, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
+    memcpy(tmp0, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t tmp2[len + len];
-  memset(tmp2, 0U, (len + len) * sizeof (uint64_t));
-  memcpy(tmp2, resM, len * sizeof (uint64_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
 }
 
 void
@@ -2094,19 +1996,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t aM[len];
     memset(aM, 0U, len * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t c[len + len];
-    memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t resM[len];
     memset(resM, 0U, len * sizeof (uint64_t));
@@ -2115,20 +2010,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     memset(ctx, 0U, (len + len) * sizeof (uint64_t));
     memcpy(ctx, n, len * sizeof (uint64_t));
     memcpy(ctx + len, r2, len * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -2139,50 +2034,39 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t tmp[len + len];
-    memset(tmp, 0U, (len + len) * sizeof (uint64_t));
-    memcpy(tmp, resM, len * sizeof (uint64_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t aM[len];
   memset(aM, 0U, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t c0[len + len];
-  memset(c0, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c0);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c0, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t resM[len];
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2193,29 +2077,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -2229,39 +2113,35 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
-  uint64_t tmp1[len];
-  memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  uint64_t tmp0[len];
+  memset(tmp0, 0U, len * sizeof (uint64_t));
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint64_t *os = tmp1;
-        uint64_t x = (c & res_j[i]) | (~c & tmp1[i]);
+        uint64_t *os = tmp0;
+        uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;
       });
     uint64_t *ctx_n = ctx;
-    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t tmp2[len + len];
-  memset(tmp2, 0U, (len + len) * sizeof (uint64_t));
-  memcpy(tmp2, resM, len * sizeof (uint64_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
 }
 
 void
diff --git a/src/Hacl_Bignum256.c b/src/Hacl_Bignum256.c
index 41aaadeb..54bbc88a 100644
--- a/src/Hacl_Bignum256.c
+++ b/src/Hacl_Bignum256.c
@@ -60,23 +60,23 @@ Write `a + b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -91,23 +91,23 @@ Write `a - b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   return c;
@@ -125,52 +125,52 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -188,53 +188,53 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -248,30 +248,30 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 /**
@@ -282,31 +282,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -314,29 +314,29 @@ void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 4U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_add_mod(n, res, res, res);
   }
@@ -344,112 +344,119 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
 }
 
+static inline void to(uint64_t *n, uint64_t nInv, uint64_t *r2, uint64_t *a, uint64_t *aM)
+{
+  uint64_t c[8U] = { 0U };
+  Hacl_Bignum256_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, aM, 4U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
   uint64_t c1 = Hacl_Bignum256_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -459,82 +466,14 @@ static inline void
 amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t bj = bM[i0];
-    uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    {
-      uint64_t a_i = aM[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c1, res_i0);
-      uint64_t a_i0 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c1, res_i1);
-      uint64_t a_i1 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c1, res_i2);
-      uint64_t a_i2 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c1, res_i);
-    }
-    uint64_t r = c1;
-    c[(uint32_t)4U + i0] = r;);
+  Hacl_Bignum256_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *ab = aM;
-    uint64_t a_j = aM[i0];
-    uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
-    {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c1, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c1, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c1, res_i);
-    }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
-    {
-      uint64_t a_i = ab[i];
-      uint64_t *res_i = res_j + i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i);
-    }
-    uint64_t r = c1;
-    c[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, c, c);
-  KRML_HOST_IGNORE(c0);
-  uint64_t tmp[8U] = { 0U };
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(aM[i], aM[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
-    uint64_t lo = FStar_UInt128_uint128_to_uint64(res);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+  Hacl_Bignum256_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -543,50 +482,9 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[4U] = { 0U };
   uint64_t a1[8U] = { 0U };
-  memcpy(a1, a, (uint32_t)8U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = mu * a1[i0];
-    uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
-    }
-    uint64_t r = c;
-    uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)4U + i0;
-    uint64_t res_j = a1[(uint32_t)4U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
-  uint64_t c00 = c0;
-  uint64_t tmp[4U] = { 0U };
-  uint64_t c1 = Hacl_Bignum256_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = a_mod;
-    uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;);
-  uint64_t c[8U] = { 0U };
-  Hacl_Bignum256_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 8U * sizeof (uint64_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -603,23 +501,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[4U] = { 0U };
     precompr2(nBits, n, r2);
@@ -628,68 +525,68 @@ bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
   return m00 & m;
@@ -706,26 +603,24 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
-    uint64_t c[8U] = { 0U };
-    Hacl_Bignum256_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -733,86 +628,76 @@ exp_vartime_precomp(
       uint64_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[4U] = { 0U };
-  uint64_t c[8U] = { 0U };
-  Hacl_Bignum256_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(resM, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -826,32 +711,30 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
-    uint64_t c[8U] = { 0U };
-    Hacl_Bignum256_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        4U,
+        1U,
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint64_t *ctx_n0 = ctx;
@@ -862,73 +745,65 @@ exp_consttime_precomp(
     }
     uint64_t sw0 = sw;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      4U,
+      1U,
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
-    uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[4U] = { 0U };
-  uint64_t c0[8U] = { 0U };
-  Hacl_Bignum256_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -936,40 +811,38 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -1034,17 +907,16 @@ Hacl_Bignum256_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1077,17 +949,16 @@ Hacl_Bignum256_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1108,67 +979,66 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[4U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR3(i,
-      (uint32_t)0U,
-      (uint32_t)3U,
-      (uint32_t)1U,
+      0U,
+      3U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1192,17 +1062,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum256_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
+  memcpy(n11, n, 4U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)4U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 4U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1330,21 +1198,21 @@ Hacl_Bignum256_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[4U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1366,36 +1234,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1415,36 +1275,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1462,12 +1314,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, b[4U - i - 1U]););
 }
 
 /**
@@ -1479,12 +1327,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(res + i * (uint32_t)8U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(res + i * 8U, b[i]););
 }
 
 
@@ -1500,14 +1344,14 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   return acc;
 }
 
@@ -1518,11 +1362,11 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
diff --git a/src/Hacl_Bignum256_32.c b/src/Hacl_Bignum256_32.c
index ada15309..eed6c65c 100644
--- a/src/Hacl_Bignum256_32.c
+++ b/src/Hacl_Bignum256_32.c
@@ -60,26 +60,26 @@ Write `a + b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   return c;
 }
@@ -93,26 +93,26 @@ Write `a - b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   return c;
 }
@@ -129,56 +129,56 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -196,57 +196,57 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -260,32 +260,32 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
+    uint32_t c = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i););
     uint32_t r = c;
-    res[(uint32_t)8U + i0] = r;);
+    res[8U + i0] = r;);
 }
 
 /**
@@ -296,31 +296,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -328,29 +328,29 @@ void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
     }
     uint32_t r = c;
     res[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint32_t tmp[16U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 8U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_32_add_mod(n, res, res, res);
   }
@@ -358,118 +358,125 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
+  uint32_t c1 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i););
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
 }
 
+static inline void to(uint32_t *n, uint32_t nInv, uint32_t *r2, uint32_t *a, uint32_t *aM)
+{
+  uint32_t c[16U] = { 0U };
+  Hacl_Bignum256_32_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[16U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp, aM, 8U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
   uint32_t c1 = Hacl_Bignum256_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -479,84 +486,14 @@ static inline void
 amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
-  KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t bj = bM[i0];
-    uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = aM[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c1, res_i0);
-      uint32_t a_i0 = aM[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c1, res_i1);
-      uint32_t a_i1 = aM[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c1, res_i2);
-      uint32_t a_i2 = aM[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c1, res_i););
-    uint32_t r = c1;
-    c[(uint32_t)8U + i0] = r;);
+  Hacl_Bignum256_32_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
-  KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *ab = aM;
-    uint32_t a_j = aM[i0];
-    uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
-    {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c1, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c1, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c1, res_i);
-    }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
-    {
-      uint32_t a_i = ab[i];
-      uint32_t *res_i = res_j + i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i);
-    }
-    uint32_t r = c1;
-    c[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, c, c);
-  KRML_HOST_IGNORE(c0);
-  uint32_t tmp[16U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint64_t res = (uint64_t)aM[i] * (uint64_t)aM[i];
-    uint32_t hi = (uint32_t)(res >> (uint32_t)32U);
-    uint32_t lo = (uint32_t)res;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+  Hacl_Bignum256_32_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -565,52 +502,9 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[8U] = { 0U };
   uint32_t a1[16U] = { 0U };
-  memcpy(a1, a, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t qj = mu * a1[i0];
-    uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i););
-    uint32_t r = c;
-    uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)8U + i0;
-    uint32_t res_j = a1[(uint32_t)8U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t c00 = c0;
-  uint32_t tmp[8U] = { 0U };
-  uint32_t c1 = Hacl_Bignum256_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = a_mod;
-    uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;);
-  uint32_t c[16U] = { 0U };
-  Hacl_Bignum256_32_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 16U * sizeof (uint32_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -627,22 +521,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[8U] = { 0U };
     precompr2(nBits, n, r2);
@@ -651,68 +545,68 @@ bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
   return m00 & m;
@@ -729,26 +623,24 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
-    uint32_t c[16U] = { 0U };
-    Hacl_Bignum256_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -756,86 +648,76 @@ exp_vartime_precomp(
       uint32_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[8U] = { 0U };
-  uint32_t c[16U] = { 0U };
-  Hacl_Bignum256_32_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(resM, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -849,32 +731,30 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
-    uint32_t c[16U] = { 0U };
-    Hacl_Bignum256_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        8U,
+        1U,
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint32_t *ctx_n0 = ctx;
@@ -885,73 +765,65 @@ exp_consttime_precomp(
     }
     uint32_t sw0 = sw;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      8U,
+      1U,
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
-    uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[8U] = { 0U };
-  uint32_t c0[16U] = { 0U };
-  Hacl_Bignum256_32_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -959,40 +831,38 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -1057,16 +927,16 @@ Hacl_Bignum256_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1099,16 +969,16 @@ Hacl_Bignum256_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1129,80 +999,80 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[8U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[8U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
     {
-      uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * 0U];
+      uint32_t *res_i0 = res1 + 4U * 0U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * 0U + 1U];
+      uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * 0U + 2U];
+      uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * 0U + 3U];
+      uint32_t *res_i = res1 + 4U * 0U + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)4U,
-      (uint32_t)7U,
-      (uint32_t)1U,
+      4U,
+      7U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1226,16 +1096,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum256_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
+  memcpy(n11, n, 8U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)8U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = 8U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1363,35 +1232,35 @@ Hacl_Bignum256_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[8U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
   {
-    uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * 0U];
+    uint32_t *res_i0 = res1 + 4U * 0U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * 0U + 1U];
+    uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * 0U + 2U];
+    uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * 0U + 3U];
+    uint32_t *res_i = res1 + 4U * 0U + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)4U,
-    (uint32_t)7U,
-    (uint32_t)1U,
+    4U,
+    7U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1413,36 +1282,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1462,36 +1323,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1509,12 +1362,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)8U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(res + i * 4U, b[8U - i - 1U]););
 }
 
 /**
@@ -1526,12 +1375,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(res + i * (uint32_t)4U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(res + i * 4U, b[i]););
 }
 
 
@@ -1547,14 +1392,14 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   return acc;
 }
 
@@ -1565,11 +1410,11 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
diff --git a/src/Hacl_Bignum32.c b/src/Hacl_Bignum32.c
index a9bb4986..34b46324 100644
--- a/src/Hacl_Bignum32.c
+++ b/src/Hacl_Bignum32.c
@@ -105,9 +105,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, b, tmp, res);
 }
 
@@ -119,9 +119,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, a, tmp, res);
 }
 
@@ -142,61 +142,8 @@ bn_slow_precomp(
   uint32_t a1[len + len];
   memset(a1, 0U, (len + len) * sizeof (uint32_t));
   memcpy(a1, a, (len + len) * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
-  {
-    uint32_t qj = mu * a1[i0];
-    uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
-    {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
-    }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
-    {
-      uint32_t a_i = n[i];
-      uint32_t *res_i = res_j0 + i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i);
-    }
-    uint32_t r = c;
-    uint32_t c1 = r;
-    uint32_t *resb = a1 + len + i0;
-    uint32_t res_j = a1[len + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + len, (len + len - len) * sizeof (uint32_t));
-  uint32_t c00 = c0;
-  KRML_CHECK_SIZE(sizeof (uint32_t), len);
-  uint32_t tmp0[len];
-  memset(tmp0, 0U, len * sizeof (uint32_t));
-  uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
-  {
-    uint32_t *os = a_mod;
-    uint32_t x = (m & tmp0[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t c[len + len];
-  memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a_mod, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, res);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, mu, a1, a_mod);
+  Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a_mod, res);
 }
 
 /**
@@ -216,20 +163,20 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t r2[len];
@@ -242,7 +189,7 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -276,8 +223,8 @@ Hacl_Bignum32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -285,7 +232,7 @@ Hacl_Bignum32_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -319,8 +266,8 @@ Hacl_Bignum32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -328,7 +275,7 @@ Hacl_Bignum32_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -353,23 +300,23 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t bn_zero[len];
   memset(bn_zero, 0U, len * sizeof (uint32_t));
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -377,53 +324,48 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t n2[len];
     memset(n2, 0U, len * sizeof (uint32_t));
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
     uint32_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint32_t *a1 = n + (uint32_t)1U;
-      uint32_t *res1 = n2 + (uint32_t)1U;
+      uint32_t *a1 = n + 1U;
+      uint32_t *res1 = n2 + 1U;
       uint32_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint32_t t1 = a1[(uint32_t)4U * i];
-        uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-        uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-        uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-        uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+        uint32_t t1 = a1[4U * i];
+        uint32_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+        uint32_t t10 = a1[4U * i + 1U];
+        uint32_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+        uint32_t t11 = a1[4U * i + 2U];
+        uint32_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+        uint32_t t12 = a1[4U * i + 3U];
+        uint32_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint32_t t1 = a1[i];
         uint32_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
       }
       uint32_t c10 = c;
       c1 = c10;
@@ -432,20 +374,14 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)32U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, 32U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -477,7 +413,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -632,38 +568,33 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -672,13 +603,13 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)32U * len1,
+    32U * len1,
     n2,
     res);
 }
@@ -702,36 +633,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -751,36 +674,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -797,14 +712,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_be(tmp + i * (uint32_t)4U, b[bnLen - i - (uint32_t)1U]);
+    store32_be(tmp + i * 4U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -817,14 +732,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_le(tmp + i * (uint32_t)4U, b[i]);
+    store32_le(tmp + i * 4U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -842,12 +757,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -859,8 +774,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Bignum4096.c b/src/Hacl_Bignum4096.c
index bf8fd6d2..3572db07 100644
--- a/src/Hacl_Bignum4096.c
+++ b/src/Hacl_Bignum4096.c
@@ -63,26 +63,26 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   return c;
 }
@@ -96,26 +96,26 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   return c;
 }
@@ -132,53 +132,53 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -198,54 +198,54 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -262,7 +262,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(64U, a, b, tmp, res);
 }
 
 /**
@@ -274,16 +274,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_sqr(uint64_t *a, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(64U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 64U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_add_mod(n, res, res, res);
   }
@@ -291,61 +291,61 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i););
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -353,50 +353,57 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *
   }
 }
 
+static inline void to(uint64_t *n, uint64_t nInv, uint64_t *r2, uint64_t *a, uint64_t *aM)
+{
+  uint64_t c[128U] = { 0U };
+  Hacl_Bignum4096_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[128U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp, aM, 64U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
   uint64_t c1 = Hacl_Bignum4096_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -408,16 +415,14 @@ static inline void
 amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM)
 {
   uint64_t c[128U] = { 0U };
-  uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, aM, bM, tmp, c);
+  Hacl_Bignum4096_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM)
 {
   uint64_t c[128U] = { 0U };
-  uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, aM, tmp, c);
+  Hacl_Bignum4096_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -426,50 +431,9 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[64U] = { 0U };
   uint64_t a1[128U] = { 0U };
-  memcpy(a1, a, (uint32_t)128U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
-  {
-    uint64_t qj = mu * a1[i0];
-    uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i););
-    uint64_t r = c;
-    uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)64U + i0;
-    uint64_t res_j = a1[(uint32_t)64U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
-  uint64_t c00 = c0;
-  uint64_t tmp[64U] = { 0U };
-  uint64_t c1 = Hacl_Bignum4096_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
-  {
-    uint64_t *os = a_mod;
-    uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  uint64_t c[128U] = { 0U };
-  Hacl_Bignum4096_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 128U * sizeof (uint64_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -486,22 +450,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[64U] = { 0U };
     precompr2(nBits, n, r2);
@@ -510,65 +473,65 @@ bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -586,26 +549,24 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
-    uint64_t c[128U] = { 0U };
-    Hacl_Bignum4096_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -613,86 +574,76 @@ exp_vartime_precomp(
       uint64_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[64U] = { 0U };
-  uint64_t c[128U] = { 0U };
-  Hacl_Bignum4096_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(resM, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -706,30 +657,28 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
-    uint64_t c[128U] = { 0U };
-    Hacl_Bignum4096_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      for (uint32_t i = 0U; i < 64U; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -740,70 +689,62 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+    for (uint32_t i = 0U; i < 64U; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[64U] = { 0U };
-  uint64_t c0[128U] = { 0U };
-  Hacl_Bignum4096_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -813,28 +754,28 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -843,9 +784,7 @@ exp_consttime_precomp(
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -910,17 +849,16 @@ Hacl_Bignum4096_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -953,17 +891,16 @@ Hacl_Bignum4096_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -984,22 +921,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[64U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -1007,57 +944,56 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[64U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+      0U,
+      15U,
+      1U,
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
     KRML_MAYBE_FOR3(i,
-      (uint32_t)60U,
-      (uint32_t)63U,
-      (uint32_t)1U,
+      60U,
+      63U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1081,17 +1017,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum4096_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
+  memcpy(n11, n, 64U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)64U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 64U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1219,37 +1153,37 @@ Hacl_Bignum4096_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[64U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t t1 = a1[(uint32_t)4U * i];
-    uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-    uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-    uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-    uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+    0U,
+    15U,
+    1U,
+    uint64_t t1 = a1[4U * i];
+    uint64_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+    uint64_t t10 = a1[4U * i + 1U];
+    uint64_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+    uint64_t t11 = a1[4U * i + 2U];
+    uint64_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+    uint64_t t12 = a1[4U * i + 3U];
+    uint64_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
   KRML_MAYBE_FOR3(i,
-    (uint32_t)60U,
-    (uint32_t)63U,
-    (uint32_t)1U,
+    60U,
+    63U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1271,36 +1205,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1320,36 +1246,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1367,10 +1285,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)64U - i - (uint32_t)1U]);
+    store64_be(res + i * 8U, b[64U - i - 1U]);
   }
 }
 
@@ -1383,10 +1301,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_le(res + i * (uint32_t)8U, b[i]);
+    store64_le(res + i * 8U, b[i]);
   }
 }
 
@@ -1403,12 +1321,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -1420,8 +1338,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Bignum4096_32.c b/src/Hacl_Bignum4096_32.c
index 2f8d70f1..1a8b361c 100644
--- a/src/Hacl_Bignum4096_32.c
+++ b/src/Hacl_Bignum4096_32.c
@@ -64,24 +64,24 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   return c;
@@ -96,24 +96,24 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   return c;
@@ -131,51 +131,51 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -195,52 +195,52 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -257,7 +257,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(128U, a, b, tmp, res);
 }
 
 /**
@@ -269,16 +269,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_32_sqr(uint32_t *a, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(128U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 128U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_32_add_mod(n, res, res, res);
   }
@@ -286,59 +286,59 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -346,49 +346,56 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *
   }
 }
 
+static inline void to(uint32_t *n, uint32_t nInv, uint32_t *r2, uint32_t *a, uint32_t *aM)
+{
+  uint32_t c[256U] = { 0U };
+  Hacl_Bignum4096_32_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[256U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp, aM, 128U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
   uint32_t c1 = Hacl_Bignum4096_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -400,16 +407,14 @@ static inline void
 amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM)
 {
   uint32_t c[256U] = { 0U };
-  uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, aM, bM, tmp, c);
+  Hacl_Bignum4096_32_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM)
 {
   uint32_t c[256U] = { 0U };
-  uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, aM, tmp, c);
+  Hacl_Bignum4096_32_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -418,49 +423,9 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[128U] = { 0U };
   uint32_t a1[256U] = { 0U };
-  memcpy(a1, a, (uint32_t)256U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
-  {
-    uint32_t qj = mu * a1[i0];
-    uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
-    {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
-    }
-    uint32_t r = c;
-    uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)128U + i0;
-    uint32_t res_j = a1[(uint32_t)128U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t c00 = c0;
-  uint32_t tmp[128U] = { 0U };
-  uint32_t c1 = Hacl_Bignum4096_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
-  {
-    uint32_t *os = a_mod;
-    uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  uint32_t c[256U] = { 0U };
-  Hacl_Bignum4096_32_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 256U * sizeof (uint32_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -477,21 +442,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[128U] = { 0U };
     precompr2(nBits, n, r2);
@@ -500,65 +465,65 @@ bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -576,26 +541,24 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
-    uint32_t c[256U] = { 0U };
-    Hacl_Bignum4096_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -603,86 +566,76 @@ exp_vartime_precomp(
       uint32_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[128U] = { 0U };
-  uint32_t c[256U] = { 0U };
-  Hacl_Bignum4096_32_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(resM, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -696,30 +649,28 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
-    uint32_t c[256U] = { 0U };
-    Hacl_Bignum4096_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      for (uint32_t i = 0U; i < 128U; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -730,70 +681,62 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+    for (uint32_t i = 0U; i < 128U; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[128U] = { 0U };
-  uint32_t c0[256U] = { 0U };
-  Hacl_Bignum4096_32_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -803,28 +746,28 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -833,9 +776,7 @@ exp_consttime_precomp(
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -900,16 +841,16 @@ Hacl_Bignum4096_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -942,16 +883,16 @@ Hacl_Bignum4096_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -972,22 +913,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[128U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -995,55 +936,55 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[128U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+    for (uint32_t i = 0U; i < 31U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)124U,
-      (uint32_t)127U,
-      (uint32_t)1U,
+      124U,
+      127U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1067,16 +1008,16 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum4096_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
+  memcpy(n11, n, 128U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)128U, .n = n11, .mu = mu, .r2 = r21 };
+  res = { .len = 128U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1204,36 +1145,36 @@ Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[128U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+  for (uint32_t i = 0U; i < 31U; i++)
   {
-    uint32_t t1 = a1[(uint32_t)4U * i];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * i];
+    uint32_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * i + 1U];
+    uint32_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * i + 2U];
+    uint32_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * i + 3U];
+    uint32_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)124U,
-    (uint32_t)127U,
-    (uint32_t)1U,
+    124U,
+    127U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1255,36 +1196,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1304,36 +1237,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1351,10 +1276,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)128U - i - (uint32_t)1U]);
+    store32_be(res + i * 4U, b[128U - i - 1U]);
   }
 }
 
@@ -1367,10 +1292,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_le(res + i * (uint32_t)4U, b[i]);
+    store32_le(res + i * 4U, b[i]);
   }
 }
 
@@ -1387,12 +1312,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -1404,8 +1329,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Bignum64.c b/src/Hacl_Bignum64.c
index 7300a993..f8f5bb6f 100644
--- a/src/Hacl_Bignum64.c
+++ b/src/Hacl_Bignum64.c
@@ -104,9 +104,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum64_mul(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, b, tmp, res);
 }
 
@@ -118,9 +118,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum64_sqr(uint32_t len, uint64_t *a, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, a, tmp, res);
 }
 
@@ -141,61 +141,8 @@ bn_slow_precomp(
   uint64_t a1[len + len];
   memset(a1, 0U, (len + len) * sizeof (uint64_t));
   memcpy(a1, a, (len + len) * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
-  {
-    uint64_t qj = mu * a1[i0];
-    uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
-    {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
-    }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
-    {
-      uint64_t a_i = n[i];
-      uint64_t *res_i = res_j0 + i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i);
-    }
-    uint64_t r = c;
-    uint64_t c1 = r;
-    uint64_t *resb = a1 + len + i0;
-    uint64_t res_j = a1[len + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + len, (len + len - len) * sizeof (uint64_t));
-  uint64_t c00 = c0;
-  KRML_CHECK_SIZE(sizeof (uint64_t), len);
-  uint64_t tmp0[len];
-  memset(tmp0, 0U, len * sizeof (uint64_t));
-  uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
-  {
-    uint64_t *os = a_mod;
-    uint64_t x = (m & tmp0[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t c[len + len];
-  memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a_mod, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, res);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, mu, a1, a_mod);
+  Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a_mod, res);
 }
 
 /**
@@ -215,20 +162,20 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t r2[len];
@@ -241,7 +188,7 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -275,8 +222,8 @@ Hacl_Bignum64_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -284,7 +231,7 @@ Hacl_Bignum64_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -318,8 +265,8 @@ Hacl_Bignum64_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -327,7 +274,7 @@ Hacl_Bignum64_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -352,23 +299,23 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t bn_zero[len];
   memset(bn_zero, 0U, len * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -376,53 +323,48 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t n2[len];
     memset(n2, 0U, len * sizeof (uint64_t));
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
     uint64_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint64_t *a1 = n + (uint32_t)1U;
-      uint64_t *res1 = n2 + (uint32_t)1U;
+      uint64_t *a1 = n + 1U;
+      uint64_t *res1 = n2 + 1U;
       uint64_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint64_t t1 = a1[(uint32_t)4U * i];
-        uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-        uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-        uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-        uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+        uint64_t t1 = a1[4U * i];
+        uint64_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+        uint64_t t10 = a1[4U * i + 1U];
+        uint64_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+        uint64_t t11 = a1[4U * i + 2U];
+        uint64_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+        uint64_t t12 = a1[4U * i + 3U];
+        uint64_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint64_t t1 = a1[i];
         uint64_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
       }
       uint64_t c10 = c;
       c1 = c10;
@@ -431,20 +373,14 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)64U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, 64U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -476,7 +412,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -631,38 +567,33 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -671,13 +602,13 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)64U * len1,
+    64U * len1,
     n2,
     res);
 }
@@ -701,36 +632,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -750,36 +673,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -796,14 +711,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_be(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -816,14 +731,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_le(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_le(tmp + i * (uint32_t)8U, b[i]);
+    store64_le(tmp + i * 8U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -841,12 +756,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -858,8 +773,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_eq_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Chacha20.c b/src/Hacl_Chacha20.c
index 8966e19e..38a5c373 100644
--- a/src/Hacl_Chacha20.c
+++ b/src/Hacl_Chacha20.c
@@ -28,7 +28,7 @@
 const
 uint32_t
 Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+  { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
 {
@@ -37,7 +37,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std0 = st[d];
   uint32_t sta10 = sta + stb0;
   uint32_t std10 = std0 ^ sta10;
-  uint32_t std2 = std10 << (uint32_t)16U | std10 >> (uint32_t)16U;
+  uint32_t std2 = std10 << 16U | std10 >> 16U;
   st[a] = sta10;
   st[d] = std2;
   uint32_t sta0 = st[c];
@@ -45,7 +45,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std3 = st[b];
   uint32_t sta11 = sta0 + stb1;
   uint32_t std11 = std3 ^ sta11;
-  uint32_t std20 = std11 << (uint32_t)12U | std11 >> (uint32_t)20U;
+  uint32_t std20 = std11 << 12U | std11 >> 20U;
   st[c] = sta11;
   st[b] = std20;
   uint32_t sta2 = st[a];
@@ -53,7 +53,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std4 = st[d];
   uint32_t sta12 = sta2 + stb2;
   uint32_t std12 = std4 ^ sta12;
-  uint32_t std21 = std12 << (uint32_t)8U | std12 >> (uint32_t)24U;
+  uint32_t std21 = std12 << 8U | std12 >> 24U;
   st[a] = sta12;
   st[d] = std21;
   uint32_t sta3 = st[c];
@@ -61,21 +61,21 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std = st[b];
   uint32_t sta1 = sta3 + stb;
   uint32_t std1 = std ^ sta1;
-  uint32_t std22 = std1 << (uint32_t)7U | std1 >> (uint32_t)25U;
+  uint32_t std22 = std1 << 7U | std1 >> 25U;
   st[c] = sta1;
   st[b] = std22;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 1U, 5U, 9U, 13U);
+  quarter_round(st, 2U, 6U, 10U, 14U);
+  quarter_round(st, 3U, 7U, 11U, 15U);
+  quarter_round(st, 0U, 5U, 10U, 15U);
+  quarter_round(st, 1U, 6U, 11U, 12U);
+  quarter_round(st, 2U, 7U, 8U, 13U);
+  quarter_round(st, 3U, 4U, 9U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -94,14 +94,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[12U] = k[12U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -110,35 +110,34 @@ static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 
 static const
 uint32_t
-chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+chacha20_constants[4U] = { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -151,27 +150,23 @@ static void chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr, u
   chacha20_core(k, ctx, incr);
   uint32_t bl[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
-    uint8_t *bj = text + i * (uint32_t)4U;
+    uint8_t *bj = text + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
     uint32_t x = bl[i] ^ k[i];
     os[i] = x;);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, bl[i]););
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, bl[i]););
 }
 
 static inline void
@@ -186,16 +181,16 @@ chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr,
 void
 Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
 {
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U);
+    chacha20_encrypt_block(ctx, out + i * 64U, i, text + i * 64U);
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    chacha20_encrypt_last(ctx, rem, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
+    chacha20_encrypt_last(ctx, rem, out + nb * 64U, nb, text + nb * 64U);
   }
 }
 
diff --git a/src/Hacl_Chacha20Poly1305_128.c b/src/Hacl_Chacha20Poly1305_128.c
index 4cf2eae9..297f1c8f 100644
--- a/src/Hacl_Chacha20Poly1305_128.c
+++ b/src/Hacl_Chacha20Poly1305_128.c
@@ -32,56 +32,51 @@
 static inline void
 poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)32U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 32U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t00;
     Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -92,12 +87,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -202,37 +197,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -268,43 +254,39 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     }
     Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -315,12 +297,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -435,37 +417,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -479,41 +452,37 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -524,12 +493,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -644,37 +613,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -690,40 +650,36 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec128 *acc = ctx;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -734,12 +690,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -854,37 +810,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -914,48 +861,44 @@ poly1305_do_128(
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U };
   uint8_t block[16U] = { 0U };
   Hacl_Poly1305_128_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  if (aadlen != 0U)
   {
     poly1305_padded_128(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_128(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
   Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec128 f01 = f010;
   Lib_IntVector_Intrinsics_vec128 f111 = f110;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -966,12 +909,12 @@ poly1305_do_128(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
   Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1086,37 +1029,28 @@ poly1305_do_128(
   Lib_IntVector_Intrinsics_vec128 t2 = a26;
   Lib_IntVector_Intrinsics_vec128 t3 = a36;
   Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1160,9 +1094,9 @@ Hacl_Chacha20Poly1305_128_aead_encrypt(
   uint8_t *mac
 )
 {
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
 }
@@ -1202,22 +1136,22 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(
 {
   uint8_t computed_mac[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Chacha20Poly1305_256.c b/src/Hacl_Chacha20Poly1305_256.c
index c3dfec03..6a278daa 100644
--- a/src/Hacl_Chacha20Poly1305_256.c
+++ b/src/Hacl_Chacha20Poly1305_256.c
@@ -32,58 +32,52 @@
 static inline void
 poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)64U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 64U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t00;
     Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -94,12 +88,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -204,37 +198,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -270,43 +255,39 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     }
     Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -317,12 +298,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -437,37 +418,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -481,41 +453,37 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -526,12 +494,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -646,37 +614,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -692,40 +651,36 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec256 *acc = ctx;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -736,12 +691,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -856,37 +811,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -916,48 +862,44 @@ poly1305_do_256(
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U };
   uint8_t block[16U] = { 0U };
   Hacl_Poly1305_256_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  if (aadlen != 0U)
   {
     poly1305_padded_256(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_256(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec256 f01 = f010;
   Lib_IntVector_Intrinsics_vec256 f111 = f110;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -968,12 +910,12 @@ poly1305_do_256(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
   Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1088,37 +1030,28 @@ poly1305_do_256(
   Lib_IntVector_Intrinsics_vec256 t2 = a26;
   Lib_IntVector_Intrinsics_vec256 t3 = a36;
   Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1162,9 +1095,9 @@ Hacl_Chacha20Poly1305_256_aead_encrypt(
   uint8_t *mac
 )
 {
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_256(key, aadlen, aad, mlen, cipher, mac);
 }
@@ -1204,22 +1137,22 @@ Hacl_Chacha20Poly1305_256_aead_decrypt(
 {
   uint8_t computed_mac[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_256(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Chacha20Poly1305_32.c b/src/Hacl_Chacha20Poly1305_32.c
index 179af485..211cf619 100644
--- a/src/Hacl_Chacha20Poly1305_32.c
+++ b/src/Hacl_Chacha20Poly1305_32.c
@@ -29,29 +29,29 @@
 
 static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  uint64_t *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  uint64_t *pre0 = ctx + 5U;
   uint64_t *acc0 = ctx;
-  uint32_t nb = n * (uint32_t)16U / (uint32_t)16U;
-  uint32_t rem1 = n * (uint32_t)16U % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = n * 16U / 16U;
+  uint32_t rem1 = n * 16U % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = blocks + i * (uint32_t)16U;
+    uint8_t *block = blocks + i * 16U;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -62,12 +62,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -122,28 +122,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -157,23 +157,23 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = blocks + nb * (uint32_t)16U;
+    uint8_t *last = blocks + nb * 16U;
     uint64_t e[5U] = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -184,12 +184,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     uint64_t mask = b;
-    uint64_t fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = fi | mask;
+    uint64_t fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = fi | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -244,28 +244,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -281,22 +281,22 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    uint64_t *pre = ctx + (uint32_t)5U;
+    uint64_t *pre = ctx + 5U;
     uint64_t *acc = ctx;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -307,12 +307,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -367,28 +367,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -418,30 +418,30 @@ poly1305_do_32(
   uint64_t ctx[25U] = { 0U };
   uint8_t block[16U] = { 0U };
   Hacl_Poly1305_32_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  if (aadlen != 0U)
   {
     poly1305_padded_32(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_32(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  uint64_t *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
   uint64_t e[5U] = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   uint64_t f0 = lo;
   uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
+  uint64_t f010 = f0 & 0x3ffffffULL;
+  uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+  uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = f1 >> 40U;
   uint64_t f01 = f010;
   uint64_t f111 = f110;
   uint64_t f2 = f20;
@@ -452,12 +452,12 @@ poly1305_do_32(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   uint64_t mask = b;
   uint64_t f4 = e[4U];
   e[4U] = f4 | mask;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
+  uint64_t *r5 = pre + 5U;
   uint64_t r0 = r[0U];
   uint64_t r1 = r[1U];
   uint64_t r2 = r[2U];
@@ -512,28 +512,28 @@ poly1305_do_32(
   uint64_t t2 = a26;
   uint64_t t3 = a36;
   uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
+  uint64_t mask26 = 0x3ffffffULL;
+  uint64_t z0 = t0 >> 26U;
+  uint64_t z1 = t3 >> 26U;
   uint64_t x0 = t0 & mask26;
   uint64_t x3 = t3 & mask26;
   uint64_t x1 = t1 + z0;
   uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
+  uint64_t z01 = x1 >> 26U;
+  uint64_t z11 = x4 >> 26U;
+  uint64_t t = z11 << 2U;
   uint64_t z12 = z11 + t;
   uint64_t x11 = x1 & mask26;
   uint64_t x41 = x4 & mask26;
   uint64_t x2 = t2 + z01;
   uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
+  uint64_t z02 = x2 >> 26U;
+  uint64_t z13 = x01 >> 26U;
   uint64_t x21 = x2 & mask26;
   uint64_t x02 = x01 & mask26;
   uint64_t x31 = x3 + z02;
   uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
+  uint64_t z03 = x31 >> 26U;
   uint64_t x32 = x31 & mask26;
   uint64_t x42 = x41 + z03;
   uint64_t o0 = x02;
@@ -577,9 +577,9 @@ Hacl_Chacha20Poly1305_32_aead_encrypt(
   uint8_t *mac
 )
 {
-  Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
 }
@@ -619,22 +619,22 @@ Hacl_Chacha20Poly1305_32_aead_decrypt(
 {
   uint8_t computed_mac[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Chacha20_Vec128.c b/src/Hacl_Chacha20_Vec128.c
index 1e0c4ec1..deab1dfc 100644
--- a/src/Hacl_Chacha20_Vec128.c
+++ b/src/Hacl_Chacha20_Vec128.c
@@ -32,100 +32,100 @@ static inline void double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std0 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std1 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std2 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std3 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std4 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std5 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std6 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std7 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std8 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std9 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std10 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std11 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std12 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std13 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std14 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std15 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std16 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std17 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std18 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std19 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std20 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std21 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std22 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std23 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std24 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std25 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std26 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std27 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std28 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std29 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std30 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_128(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint32_t ctr_u32 = (uint32_t)4U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint32_t ctr_u32 = 4U * ctr;
   Lib_IntVector_Intrinsics_vec128 cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
   double_round_128(k);
@@ -150,9 +150,9 @@ chacha20_core_128(
   double_round_128(k);
   double_round_128(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = k;
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,47 +164,42 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x);
     os[i] = x0;);
-  Lib_IntVector_Intrinsics_vec128
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U);
+  Lib_IntVector_Intrinsics_vec128 ctr1 = Lib_IntVector_Intrinsics_vec128_load32s(0U, 1U, 2U, 3U);
   Lib_IntVector_Intrinsics_vec128 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec128_add32(c12, ctr1);
 }
@@ -221,13 +216,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = text + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = text + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -359,19 +354,19 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -503,13 +498,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -526,13 +521,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = cipher + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -664,19 +659,19 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -808,13 +803,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/Hacl_Chacha20_Vec256.c b/src/Hacl_Chacha20_Vec256.c
index 620f5040..e61a7cfe 100644
--- a/src/Hacl_Chacha20_Vec256.c
+++ b/src/Hacl_Chacha20_Vec256.c
@@ -32,100 +32,100 @@ static inline void double_round_256(Lib_IntVector_Intrinsics_vec256 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std0 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std1 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std2 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std3 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std4 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std5 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std6 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std7 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std8 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std9 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std10 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std11 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std12 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std13 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std14 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std15 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std16 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std17 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std18 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std19 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std20 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std21 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std22 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std23 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std24 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std25 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std26 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std27 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std28 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std29 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std30 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_256(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint32_t ctr_u32 = (uint32_t)8U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint32_t ctr_u32 = 8U * ctr;
   Lib_IntVector_Intrinsics_vec256 cv = Lib_IntVector_Intrinsics_vec256_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv);
   double_round_256(k);
@@ -150,9 +150,9 @@ chacha20_core_256(
   double_round_256(k);
   double_round_256(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = k;
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,51 +164,43 @@ chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_load32(x);
     os[i] = x0;);
   Lib_IntVector_Intrinsics_vec256
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec256_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U,
-      (uint32_t)4U,
-      (uint32_t)5U,
-      (uint32_t)6U,
-      (uint32_t)7U);
+  ctr1 = Lib_IntVector_Intrinsics_vec256_load32s(0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U);
   Lib_IntVector_Intrinsics_vec256 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec256_add32(c12, ctr1);
 }
@@ -225,13 +217,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = text + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = text + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -459,19 +451,19 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -699,13 +691,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -722,13 +714,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = cipher + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -956,19 +948,19 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -1196,13 +1188,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/Hacl_Chacha20_Vec32.c b/src/Hacl_Chacha20_Vec32.c
index 2bf4764c..0dce915c 100644
--- a/src/Hacl_Chacha20_Vec32.c
+++ b/src/Hacl_Chacha20_Vec32.c
@@ -31,106 +31,106 @@ static inline void double_round_32(uint32_t *st)
 {
   st[0U] = st[0U] + st[4U];
   uint32_t std = st[12U] ^ st[0U];
-  st[12U] = std << (uint32_t)16U | std >> (uint32_t)16U;
+  st[12U] = std << 16U | std >> 16U;
   st[8U] = st[8U] + st[12U];
   uint32_t std0 = st[4U] ^ st[8U];
-  st[4U] = std0 << (uint32_t)12U | std0 >> (uint32_t)20U;
+  st[4U] = std0 << 12U | std0 >> 20U;
   st[0U] = st[0U] + st[4U];
   uint32_t std1 = st[12U] ^ st[0U];
-  st[12U] = std1 << (uint32_t)8U | std1 >> (uint32_t)24U;
+  st[12U] = std1 << 8U | std1 >> 24U;
   st[8U] = st[8U] + st[12U];
   uint32_t std2 = st[4U] ^ st[8U];
-  st[4U] = std2 << (uint32_t)7U | std2 >> (uint32_t)25U;
+  st[4U] = std2 << 7U | std2 >> 25U;
   st[1U] = st[1U] + st[5U];
   uint32_t std3 = st[13U] ^ st[1U];
-  st[13U] = std3 << (uint32_t)16U | std3 >> (uint32_t)16U;
+  st[13U] = std3 << 16U | std3 >> 16U;
   st[9U] = st[9U] + st[13U];
   uint32_t std4 = st[5U] ^ st[9U];
-  st[5U] = std4 << (uint32_t)12U | std4 >> (uint32_t)20U;
+  st[5U] = std4 << 12U | std4 >> 20U;
   st[1U] = st[1U] + st[5U];
   uint32_t std5 = st[13U] ^ st[1U];
-  st[13U] = std5 << (uint32_t)8U | std5 >> (uint32_t)24U;
+  st[13U] = std5 << 8U | std5 >> 24U;
   st[9U] = st[9U] + st[13U];
   uint32_t std6 = st[5U] ^ st[9U];
-  st[5U] = std6 << (uint32_t)7U | std6 >> (uint32_t)25U;
+  st[5U] = std6 << 7U | std6 >> 25U;
   st[2U] = st[2U] + st[6U];
   uint32_t std7 = st[14U] ^ st[2U];
-  st[14U] = std7 << (uint32_t)16U | std7 >> (uint32_t)16U;
+  st[14U] = std7 << 16U | std7 >> 16U;
   st[10U] = st[10U] + st[14U];
   uint32_t std8 = st[6U] ^ st[10U];
-  st[6U] = std8 << (uint32_t)12U | std8 >> (uint32_t)20U;
+  st[6U] = std8 << 12U | std8 >> 20U;
   st[2U] = st[2U] + st[6U];
   uint32_t std9 = st[14U] ^ st[2U];
-  st[14U] = std9 << (uint32_t)8U | std9 >> (uint32_t)24U;
+  st[14U] = std9 << 8U | std9 >> 24U;
   st[10U] = st[10U] + st[14U];
   uint32_t std10 = st[6U] ^ st[10U];
-  st[6U] = std10 << (uint32_t)7U | std10 >> (uint32_t)25U;
+  st[6U] = std10 << 7U | std10 >> 25U;
   st[3U] = st[3U] + st[7U];
   uint32_t std11 = st[15U] ^ st[3U];
-  st[15U] = std11 << (uint32_t)16U | std11 >> (uint32_t)16U;
+  st[15U] = std11 << 16U | std11 >> 16U;
   st[11U] = st[11U] + st[15U];
   uint32_t std12 = st[7U] ^ st[11U];
-  st[7U] = std12 << (uint32_t)12U | std12 >> (uint32_t)20U;
+  st[7U] = std12 << 12U | std12 >> 20U;
   st[3U] = st[3U] + st[7U];
   uint32_t std13 = st[15U] ^ st[3U];
-  st[15U] = std13 << (uint32_t)8U | std13 >> (uint32_t)24U;
+  st[15U] = std13 << 8U | std13 >> 24U;
   st[11U] = st[11U] + st[15U];
   uint32_t std14 = st[7U] ^ st[11U];
-  st[7U] = std14 << (uint32_t)7U | std14 >> (uint32_t)25U;
+  st[7U] = std14 << 7U | std14 >> 25U;
   st[0U] = st[0U] + st[5U];
   uint32_t std15 = st[15U] ^ st[0U];
-  st[15U] = std15 << (uint32_t)16U | std15 >> (uint32_t)16U;
+  st[15U] = std15 << 16U | std15 >> 16U;
   st[10U] = st[10U] + st[15U];
   uint32_t std16 = st[5U] ^ st[10U];
-  st[5U] = std16 << (uint32_t)12U | std16 >> (uint32_t)20U;
+  st[5U] = std16 << 12U | std16 >> 20U;
   st[0U] = st[0U] + st[5U];
   uint32_t std17 = st[15U] ^ st[0U];
-  st[15U] = std17 << (uint32_t)8U | std17 >> (uint32_t)24U;
+  st[15U] = std17 << 8U | std17 >> 24U;
   st[10U] = st[10U] + st[15U];
   uint32_t std18 = st[5U] ^ st[10U];
-  st[5U] = std18 << (uint32_t)7U | std18 >> (uint32_t)25U;
+  st[5U] = std18 << 7U | std18 >> 25U;
   st[1U] = st[1U] + st[6U];
   uint32_t std19 = st[12U] ^ st[1U];
-  st[12U] = std19 << (uint32_t)16U | std19 >> (uint32_t)16U;
+  st[12U] = std19 << 16U | std19 >> 16U;
   st[11U] = st[11U] + st[12U];
   uint32_t std20 = st[6U] ^ st[11U];
-  st[6U] = std20 << (uint32_t)12U | std20 >> (uint32_t)20U;
+  st[6U] = std20 << 12U | std20 >> 20U;
   st[1U] = st[1U] + st[6U];
   uint32_t std21 = st[12U] ^ st[1U];
-  st[12U] = std21 << (uint32_t)8U | std21 >> (uint32_t)24U;
+  st[12U] = std21 << 8U | std21 >> 24U;
   st[11U] = st[11U] + st[12U];
   uint32_t std22 = st[6U] ^ st[11U];
-  st[6U] = std22 << (uint32_t)7U | std22 >> (uint32_t)25U;
+  st[6U] = std22 << 7U | std22 >> 25U;
   st[2U] = st[2U] + st[7U];
   uint32_t std23 = st[13U] ^ st[2U];
-  st[13U] = std23 << (uint32_t)16U | std23 >> (uint32_t)16U;
+  st[13U] = std23 << 16U | std23 >> 16U;
   st[8U] = st[8U] + st[13U];
   uint32_t std24 = st[7U] ^ st[8U];
-  st[7U] = std24 << (uint32_t)12U | std24 >> (uint32_t)20U;
+  st[7U] = std24 << 12U | std24 >> 20U;
   st[2U] = st[2U] + st[7U];
   uint32_t std25 = st[13U] ^ st[2U];
-  st[13U] = std25 << (uint32_t)8U | std25 >> (uint32_t)24U;
+  st[13U] = std25 << 8U | std25 >> 24U;
   st[8U] = st[8U] + st[13U];
   uint32_t std26 = st[7U] ^ st[8U];
-  st[7U] = std26 << (uint32_t)7U | std26 >> (uint32_t)25U;
+  st[7U] = std26 << 7U | std26 >> 25U;
   st[3U] = st[3U] + st[4U];
   uint32_t std27 = st[14U] ^ st[3U];
-  st[14U] = std27 << (uint32_t)16U | std27 >> (uint32_t)16U;
+  st[14U] = std27 << 16U | std27 >> 16U;
   st[9U] = st[9U] + st[14U];
   uint32_t std28 = st[4U] ^ st[9U];
-  st[4U] = std28 << (uint32_t)12U | std28 >> (uint32_t)20U;
+  st[4U] = std28 << 12U | std28 >> 20U;
   st[3U] = st[3U] + st[4U];
   uint32_t std29 = st[14U] ^ st[3U];
-  st[14U] = std29 << (uint32_t)8U | std29 >> (uint32_t)24U;
+  st[14U] = std29 << 8U | std29 >> 24U;
   st[9U] = st[9U] + st[14U];
   uint32_t std30 = st[4U] ^ st[9U];
-  st[4U] = std30 << (uint32_t)7U | std30 >> (uint32_t)25U;
+  st[4U] = std30 << 7U | std30 >> 25U;
 }
 
 static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t ctr_u32 = (uint32_t)1U * ctr;
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
+  uint32_t ctr_u32 = 1U * ctr;
   uint32_t cv = ctr_u32;
   k[12U] = k[12U] + cv;
   double_round_32(k);
@@ -144,9 +144,9 @@ static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
   double_round_32(k);
   double_round_32(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -157,41 +157,41 @@ static inline void chacha20_init_32(uint32_t *ctx, uint8_t *k, uint8_t *n, uint3
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = ctx1[i];
     os[i] = x;);
-  uint32_t ctr1 = (uint32_t)0U;
+  uint32_t ctr1 = 0U;
   uint32_t c12 = ctx[12U];
   ctx[12U] = c12 + ctr1;
 }
@@ -208,39 +208,39 @@ Hacl_Chacha20_Vec32_chacha20_encrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -257,39 +257,39 @@ Hacl_Chacha20_Vec32_chacha20_decrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/Hacl_Curve25519_51.c b/src/Hacl_Curve25519_51.c
index 64c855cf..ca561e89 100644
--- a/src/Hacl_Curve25519_51.c
+++ b/src/Hacl_Curve25519_51.c
@@ -28,38 +28,38 @@
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Bignum25519_51.h"
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *nq_p1 = p01_tmp1 + 10U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
-  uint64_t *z3 = nq_p1 + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
+  uint64_t *z3 = nq_p1 + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
+  uint64_t *b = tmp1 + 5U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)5U;
+  uint64_t *z31 = nq_p1 + 5U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)5U;
+  uint64_t *c0 = dc + 5U;
   Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2);
   Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0);
   Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b1 = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)10U;
+  uint64_t *dc1 = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -68,7 +68,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
   a1[3U] = c[3U];
   a1[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2);
@@ -77,13 +77,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
 static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2);
@@ -93,7 +93,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
   a[3U] = c[3U];
   a[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b, b, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2);
 }
@@ -101,46 +101,41 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
 static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
 {
   FStar_UInt128_uint128 tmp2[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp2[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp2[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   uint64_t p01_tmp1_swap[41U] = { 0U };
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)10U;
-  memcpy(p11, init, (uint32_t)10U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 10U;
+  memcpy(p11, init, 10U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)5U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  x0[4U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
-  z0[4U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 5U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  x0[4U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
+  z0[4U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)40U;
-  Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 10U;
+  uint64_t *swap = p01_tmp1_swap + 40U;
+  Hacl_Impl_Curve25519_Field51_cswap2(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
+    uint64_t *swap1 = p01_tmp1_swap + 40U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)10U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 10U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -149,11 +144,11 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)10U * sizeof (uint64_t));
+  memcpy(out, p0, 10U * sizeof (uint64_t));
 }
 
 void
@@ -165,7 +160,7 @@ Hacl_Curve25519_51_fsquare_times(
 )
 {
   Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp);
   }
@@ -175,60 +170,56 @@ void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tm
 {
   uint64_t t1[20U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)5U;
-  uint64_t *t010 = t1 + (uint32_t)15U;
+  uint64_t *b1 = t1 + 5U;
+  uint64_t *t010 = t1 + 15U;
   FStar_UInt128_uint128 *tmp10 = tmp;
-  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, 1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 2U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, i, tmp);
   Hacl_Impl_Curve25519_Field51_fmul(a1, b1, a1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 1U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, 5U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)5U;
-  uint64_t *c10 = t1 + (uint32_t)10U;
-  uint64_t *t011 = t1 + (uint32_t)15U;
+  uint64_t *b10 = t1 + 5U;
+  uint64_t *c10 = t1 + 10U;
+  uint64_t *t011 = t1 + 15U;
   FStar_UInt128_uint128 *tmp11 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, 20U);
   Hacl_Impl_Curve25519_Field51_fmul(t011, t011, c10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(b10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)5U;
-  uint64_t *c1 = t1 + (uint32_t)10U;
-  uint64_t *t01 = t1 + (uint32_t)15U;
+  uint64_t *b11 = t1 + 5U;
+  uint64_t *c1 = t1 + 10U;
+  uint64_t *t01 = t1 + 15U;
   FStar_UInt128_uint128 *tmp1 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, 100U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, c1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, b11, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)15U;
+  uint64_t *t0 = t1 + 15U;
   Hacl_Impl_Curve25519_Field51_fmul(o, t0, a, tmp);
 }
 
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)5U;
+  uint64_t *z = i + 5U;
   uint64_t tmp[5U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   FStar_UInt128_uint128 tmp_w[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp_w[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp_w[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(tmp, z, tmp_w);
   Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w);
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -243,32 +234,32 @@ void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[10U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)5U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  uint64_t f0l = tmp[0U] & (uint64_t)0x7ffffffffffffU;
-  uint64_t f0h = tmp[0U] >> (uint32_t)51U;
-  uint64_t f1l = (tmp[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  uint64_t f1h = tmp[1U] >> (uint32_t)38U;
-  uint64_t f2l = (tmp[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  uint64_t f2h = tmp[2U] >> (uint32_t)25U;
-  uint64_t f3l = (tmp[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  uint64_t f3h = tmp[3U] >> (uint32_t)12U;
+  uint64_t *z = init + 5U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  uint64_t f0l = tmp[0U] & 0x7ffffffffffffULL;
+  uint64_t f0h = tmp[0U] >> 51U;
+  uint64_t f1l = (tmp[1U] & 0x3fffffffffULL) << 13U;
+  uint64_t f1h = tmp[1U] >> 38U;
+  uint64_t f2l = (tmp[2U] & 0x1ffffffULL) << 26U;
+  uint64_t f2h = tmp[2U] >> 25U;
+  uint64_t f3l = (tmp[3U] & 0xfffULL) << 39U;
+  uint64_t f3h = tmp[3U] >> 12U;
   x[0U] = f0l;
   x[1U] = f0h | f1l;
   x[2U] = f1h | f2l;
@@ -289,7 +280,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -309,14 +300,14 @@ bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_51_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/Hacl_Curve25519_64.c b/src/Hacl_Curve25519_64.c
index fb0974fe..edcab306 100644
--- a/src/Hacl_Curve25519_64.c
+++ b/src/Hacl_Curve25519_64.c
@@ -35,7 +35,7 @@ static inline void add_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   add_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(add_scalar_e(out, f1, f2));
+  add_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -44,7 +44,7 @@ static inline void fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fadd(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fadd_e(out, f1, f2));
+  fadd_e(out, f1, f2);
   #endif
 }
 
@@ -53,7 +53,7 @@ static inline void fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsub(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fsub_e(out, f1, f2));
+  fsub_e(out, f1, f2);
   #endif
 }
 
@@ -62,7 +62,7 @@ static inline void fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tm
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul_e(tmp, f1, out, f2));
+  fmul_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -71,7 +71,7 @@ static inline void fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *t
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul2(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul2_e(tmp, f1, out, f2));
+  fmul2_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -80,7 +80,7 @@ static inline void fmul_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fmul_scalar_e(out, f1, f2));
+  fmul_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -89,7 +89,7 @@ static inline void fsqr0(uint64_t *out, uint64_t *f1, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr(out, f1, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr_e(tmp, f1, out));
+  fsqr_e(tmp, f1, out);
   #endif
 }
 
@@ -98,7 +98,7 @@ static inline void fsqr20(uint64_t *out, uint64_t *f, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr2(out, f, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr2_e(tmp, f, out));
+  fsqr2_e(tmp, f, out);
   #endif
 }
 
@@ -107,42 +107,42 @@ static inline void cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   cswap2(bit, p1, p2);
   #else
-  KRML_HOST_IGNORE(cswap2_e(bit, p1, p2));
+  cswap2_e(bit, p1, p2);
   #endif
 }
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *nq_p1 = p01_tmp1 + 8U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
-  uint64_t *z3 = nq_p1 + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
+  uint64_t *z3 = nq_p1 + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
+  uint64_t *b = tmp1 + 4U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)4U;
+  uint64_t *z31 = nq_p1 + 4U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)4U;
+  uint64_t *c0 = dc + 4U;
   fadd0(c0, x3, z31);
   fsub0(d0, x3, z31);
   fmul20(dc, dc, ab, tmp2);
   fadd0(x3, d0, c0);
   fsub0(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b1 = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)8U;
+  uint64_t *dc1 = tmp1 + 8U;
   fsqr20(dc1, ab1, tmp2);
   fsqr20(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -150,7 +150,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
   a1[2U] = c[2U];
   a1[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b1, c, (uint64_t)121665U);
+  fmul_scalar0(b1, c, 121665ULL);
   fadd0(b1, b1, d);
   fmul20(nq, dc1, ab1, tmp2);
   fmul0(z3, z3, x1, tmp2);
@@ -159,13 +159,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
 static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   fsqr20(dc, ab, tmp2);
@@ -174,7 +174,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
   a[2U] = c[2U];
   a[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b, c, (uint64_t)121665U);
+  fmul_scalar0(b, c, 121665ULL);
   fadd0(b, b, d);
   fmul20(nq, dc, ab, tmp2);
 }
@@ -186,38 +186,33 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)8U;
-  memcpy(p11, init, (uint32_t)8U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 8U;
+  memcpy(p11, init, 8U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)4U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 4U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)8U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)32U;
-  cswap20((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 8U;
+  uint64_t *swap = p01_tmp1_swap + 32U;
+  cswap20(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U;
+    uint64_t *swap1 = p01_tmp1_swap + 32U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 8U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     cswap20(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -226,17 +221,17 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   cswap20(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(out, p0, 8U * sizeof (uint64_t));
 }
 
 static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n)
 {
   fsqr0(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     fsqr0(o, o, tmp);
   }
@@ -246,66 +241,66 @@ static void finv(uint64_t *o, uint64_t *i, uint64_t *tmp)
 {
   uint64_t t1[16U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)4U;
-  uint64_t *t010 = t1 + (uint32_t)12U;
+  uint64_t *b1 = t1 + 4U;
+  uint64_t *t010 = t1 + 12U;
   uint64_t *tmp10 = tmp;
-  fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  fsquare_times(a1, i, tmp10, 1U);
+  fsquare_times(t010, a1, tmp10, 2U);
   fmul0(b1, t010, i, tmp);
   fmul0(a1, b1, a1, tmp);
-  fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  fsquare_times(t010, a1, tmp10, 1U);
   fmul0(b1, t010, b1, tmp);
-  fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  fsquare_times(t010, b1, tmp10, 5U);
   fmul0(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)4U;
-  uint64_t *c10 = t1 + (uint32_t)8U;
-  uint64_t *t011 = t1 + (uint32_t)12U;
+  uint64_t *b10 = t1 + 4U;
+  uint64_t *c10 = t1 + 8U;
+  uint64_t *t011 = t1 + 12U;
   uint64_t *tmp11 = tmp;
-  fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  fsquare_times(t011, b10, tmp11, 10U);
   fmul0(c10, t011, b10, tmp);
-  fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  fsquare_times(t011, c10, tmp11, 20U);
   fmul0(t011, t011, c10, tmp);
-  fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  fsquare_times(t011, t011, tmp11, 10U);
   fmul0(b10, t011, b10, tmp);
-  fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  fsquare_times(t011, b10, tmp11, 50U);
   fmul0(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)4U;
-  uint64_t *c1 = t1 + (uint32_t)8U;
-  uint64_t *t01 = t1 + (uint32_t)12U;
+  uint64_t *b11 = t1 + 4U;
+  uint64_t *c1 = t1 + 8U;
+  uint64_t *t01 = t1 + 12U;
   uint64_t *tmp1 = tmp;
-  fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  fsquare_times(t01, c1, tmp1, 100U);
   fmul0(t01, t01, c1, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  fsquare_times(t01, t01, tmp1, 50U);
   fmul0(t01, t01, b11, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)12U;
+  uint64_t *t0 = t1 + 12U;
   fmul0(o, t0, a, tmp);
 }
 
 static void store_felem(uint64_t *b, uint64_t *f)
 {
   uint64_t f30 = f[3U];
-  uint64_t top_bit0 = f30 >> (uint32_t)63U;
-  f[3U] = f30 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit0);
+  uint64_t top_bit0 = f30 >> 63U;
+  f[3U] = f30 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit0);
   uint64_t f31 = f[3U];
-  uint64_t top_bit = f31 >> (uint32_t)63U;
-  f[3U] = f31 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit);
+  uint64_t top_bit = f31 >> 63U;
+  f[3U] = f31 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0xffffffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0xffffffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0xffffffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7fffffffffffffffULL);
   uint64_t mask = ((m0 & m1) & m2) & m3;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0xffffffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0xffffffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0xffffffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7fffffffffffffffULL);
   uint64_t o0 = f0_;
   uint64_t o1 = f1_;
   uint64_t o2 = f2_;
@@ -319,18 +314,14 @@ static void store_felem(uint64_t *b, uint64_t *f)
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)4U;
+  uint64_t *z = i + 4U;
   uint64_t tmp[4U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   uint64_t tmp_w[16U] = { 0U };
   finv(tmp, z, tmp_w);
   fmul0(tmp, tmp, x, tmp_w);
   store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -345,23 +336,23 @@ void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[8U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)4U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
+  uint64_t *z = init + 4U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
   x[0U] = tmp[0U];
   x[1U] = tmp[1U];
   x[2U] = tmp[2U];
@@ -381,7 +372,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -401,14 +392,14 @@ bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_64_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/Hacl_EC_Ed25519.c b/src/Hacl_EC_Ed25519.c
index 46f2837b..6ab24a33 100644
--- a/src/Hacl_EC_Ed25519.c
+++ b/src/Hacl_EC_Ed25519.c
@@ -43,11 +43,11 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_zero(uint64_t *b)
 {
-  b[0U] = (uint64_t)0U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -57,11 +57,11 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_one(uint64_t *b)
 {
-  b[0U] = (uint64_t)1U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 1ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -106,8 +106,8 @@ Write `a * b mod p` in `out`.
 void Hacl_EC_Ed25519_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(out, a, b, tmp);
 }
 
@@ -123,8 +123,8 @@ Write `a * a mod p` in `out`.
 void Hacl_EC_Ed25519_felem_sqr(uint64_t *a, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
@@ -205,29 +205,29 @@ Write the base point (generator) in `p`.
 void Hacl_EC_Ed25519_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  uint64_t *gt = p + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  uint64_t *gt = p + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
 }
 
 /**
diff --git a/src/Hacl_EC_K256.c b/src/Hacl_EC_K256.c
index e48edb5b..581c223b 100644
--- a/src/Hacl_EC_K256.c
+++ b/src/Hacl_EC_K256.c
@@ -43,7 +43,7 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_zero(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
+  memset(f, 0U, 5U * sizeof (uint64_t));
 }
 
 /**
@@ -53,8 +53,8 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_one(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
-  f[0U] = (uint64_t)1U;
+  memset(f, 0U, 5U * sizeof (uint64_t));
+  f[0U] = 1ULL;
 }
 
 /**
@@ -83,7 +83,7 @@ Write `a - b mod p` in `out`.
 */
 void Hacl_EC_K256_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out)
 {
-  Hacl_K256_Field_fsub(out, a, b, (uint64_t)2U);
+  Hacl_K256_Field_fsub(out, a, b, 2ULL);
   Hacl_K256_Field_fnormalize_weak(out, out);
 }
 
@@ -189,20 +189,20 @@ Write the base point (generator) in `p`.
 void Hacl_EC_K256_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
 }
 
 /**
@@ -264,11 +264,11 @@ void Hacl_EC_K256_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out)
 {
   uint64_t scalar_q[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = scalar_q;
-    uint64_t u = load64_be(scalar + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(scalar + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   Hacl_Impl_K256_PointMul_point_mul(out, scalar_q, p);
@@ -307,20 +307,20 @@ void Hacl_EC_K256_point_load(uint8_t *b, uint64_t *out)
 {
   uint64_t p_aff[10U] = { 0U };
   uint64_t *px = p_aff;
-  uint64_t *py = p_aff + (uint32_t)5U;
+  uint64_t *py = p_aff + 5U;
   uint8_t *pxb = b;
-  uint8_t *pyb = b + (uint32_t)32U;
+  uint8_t *pyb = b + 32U;
   Hacl_K256_Field_load_felem(px, pxb);
   Hacl_K256_Field_load_felem(py, pyb);
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-  z1[0U] = (uint64_t)1U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  memcpy(x1, x, 5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memset(z1, 0U, 5U * sizeof (uint64_t));
+  z1[0U] = 1ULL;
 }
 
 /**
diff --git a/src/Hacl_Ed25519.c b/src/Hacl_Ed25519.c
index f9881e91..44dc6dba 100644
--- a/src/Hacl_Ed25519.c
+++ b/src/Hacl_Ed25519.c
@@ -49,24 +49,24 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
   uint64_t f2 = a[2U];
   uint64_t f3 = a[3U];
   uint64_t f4 = a[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   a[0U] = tmp0_;
   a[1U] = tmp1 + c5;
   a[2U] = tmp2;
@@ -77,8 +77,8 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
 static inline void fmul0(uint64_t *output, uint64_t *input, uint64_t *input2)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(output, input, input2, tmp);
 }
 
@@ -89,11 +89,11 @@ static inline void times_2(uint64_t *out, uint64_t *a)
   uint64_t a2 = a[2U];
   uint64_t a3 = a[3U];
   uint64_t a4 = a[4U];
-  uint64_t o0 = (uint64_t)2U * a0;
-  uint64_t o1 = (uint64_t)2U * a1;
-  uint64_t o2 = (uint64_t)2U * a2;
-  uint64_t o3 = (uint64_t)2U * a3;
-  uint64_t o4 = (uint64_t)2U * a4;
+  uint64_t o0 = 2ULL * a0;
+  uint64_t o1 = 2ULL * a1;
+  uint64_t o2 = 2ULL * a2;
+  uint64_t o3 = 2ULL * a3;
+  uint64_t o4 = 2ULL * a4;
   out[0U] = o0;
   out[1U] = o1;
   out[2U] = o2;
@@ -104,54 +104,54 @@ static inline void times_2(uint64_t *out, uint64_t *a)
 static inline void times_d(uint64_t *out, uint64_t *a)
 {
   uint64_t d[5U] = { 0U };
-  d[0U] = (uint64_t)0x00034dca135978a3U;
-  d[1U] = (uint64_t)0x0001a8283b156ebdU;
-  d[2U] = (uint64_t)0x0005e7a26001c029U;
-  d[3U] = (uint64_t)0x000739c663a03cbbU;
-  d[4U] = (uint64_t)0x00052036cee2b6ffU;
+  d[0U] = 0x00034dca135978a3ULL;
+  d[1U] = 0x0001a8283b156ebdULL;
+  d[2U] = 0x0005e7a26001c029ULL;
+  d[3U] = 0x000739c663a03cbbULL;
+  d[4U] = 0x00052036cee2b6ffULL;
   fmul0(out, d, a);
 }
 
 static inline void times_2d(uint64_t *out, uint64_t *a)
 {
   uint64_t d2[5U] = { 0U };
-  d2[0U] = (uint64_t)0x00069b9426b2f159U;
-  d2[1U] = (uint64_t)0x00035050762add7aU;
-  d2[2U] = (uint64_t)0x0003cf44c0038052U;
-  d2[3U] = (uint64_t)0x0006738cc7407977U;
-  d2[4U] = (uint64_t)0x0002406d9dc56dffU;
+  d2[0U] = 0x00069b9426b2f159ULL;
+  d2[1U] = 0x00035050762add7aULL;
+  d2[2U] = 0x0003cf44c0038052ULL;
+  d2[3U] = 0x0006738cc7407977ULL;
+  d2[4U] = 0x0002406d9dc56dffULL;
   fmul0(out, d2, a);
 }
 
 static inline void fsquare(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
 static inline void fsquare_times(uint64_t *output, uint64_t *input, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, input, tmp, count);
 }
 
 static inline void fsquare_times_inplace(uint64_t *output, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, output, tmp, count);
 }
 
 void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(out, a, tmp);
 }
 
@@ -162,40 +162,40 @@ static inline void reduce(uint64_t *out)
   uint64_t o2 = out[2U];
   uint64_t o3 = out[3U];
   uint64_t o4 = out[4U];
-  uint64_t l_ = o0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = o0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = o1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = o2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = o3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = o4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f0 = tmp0_;
   uint64_t f1 = tmp1 + c5;
   uint64_t f2 = tmp2;
   uint64_t f3 = tmp3;
   uint64_t f4 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f4, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f4, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f4 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f4 - (mask & 0x7ffffffffffffULL);
   uint64_t f01 = f0_;
   uint64_t f11 = f1_;
   uint64_t f21 = f2_;
@@ -212,45 +212,41 @@ void Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = u64s;
-    uint8_t *bj = input + i * (uint32_t)8U;
+    uint8_t *bj = input + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t u64s3 = u64s[3U];
-  u64s[3U] = u64s3 & (uint64_t)0x7fffffffffffffffU;
-  output[0U] = u64s[0U] & (uint64_t)0x7ffffffffffffU;
-  output[1U] = u64s[0U] >> (uint32_t)51U | (u64s[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  output[2U] = u64s[1U] >> (uint32_t)38U | (u64s[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  output[3U] = u64s[2U] >> (uint32_t)25U | (u64s[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  output[4U] = u64s[3U] >> (uint32_t)12U;
+  u64s[3U] = u64s3 & 0x7fffffffffffffffULL;
+  output[0U] = u64s[0U] & 0x7ffffffffffffULL;
+  output[1U] = u64s[0U] >> 51U | (u64s[1U] & 0x3fffffffffULL) << 13U;
+  output[2U] = u64s[1U] >> 38U | (u64s[2U] & 0x1ffffffULL) << 26U;
+  output[3U] = u64s[2U] >> 25U | (u64s[3U] & 0xfffULL) << 39U;
+  output[4U] = u64s[3U] >> 12U;
 }
 
 void Hacl_Bignum25519_store_51(uint8_t *output, uint64_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, input);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(output + i * (uint32_t)8U, u64s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(output + i * 8U, u64s[i]););
 }
 
 void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x10 = p;
-  uint64_t *y10 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y10 = p + 5U;
+  uint64_t *z1 = p + 10U;
   fsquare(tmp1, x10);
   fsquare(tmp20, y10);
   fsum(tmp30, tmp1, tmp20);
@@ -258,11 +254,11 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   fsquare(tmp1, z1);
   times_2(tmp1, tmp1);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   fsum(tmp2, x1, y1);
   fsquare(tmp2, tmp2);
   Hacl_Bignum25519_reduce_513(tmp3);
@@ -271,13 +267,13 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   Hacl_Bignum25519_reduce_513(tmp4);
   fsum(tmp10, tmp10, tmp4);
   uint64_t *tmp_f = tmp;
-  uint64_t *tmp_e = tmp + (uint32_t)5U;
-  uint64_t *tmp_h = tmp + (uint32_t)10U;
-  uint64_t *tmp_g = tmp + (uint32_t)15U;
+  uint64_t *tmp_e = tmp + 5U;
+  uint64_t *tmp_h = tmp + 10U;
+  uint64_t *tmp_g = tmp + 15U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -288,13 +284,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 {
   uint64_t tmp[30U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
+  uint64_t *y2 = q + 5U;
   fdifference(tmp1, y1, x1);
   fdifference(tmp20, y2, x2);
   fmul0(tmp30, tmp1, tmp20);
@@ -302,15 +298,15 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp20, y2, x2);
   fmul0(tmp40, tmp1, tmp20);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
-  uint64_t *tmp5 = tmp + (uint32_t)20U;
-  uint64_t *tmp6 = tmp + (uint32_t)25U;
-  uint64_t *z1 = p + (uint32_t)10U;
-  uint64_t *t1 = p + (uint32_t)15U;
-  uint64_t *z2 = q + (uint32_t)10U;
-  uint64_t *t2 = q + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
+  uint64_t *tmp5 = tmp + 20U;
+  uint64_t *tmp6 = tmp + 25U;
+  uint64_t *z1 = p + 10U;
+  uint64_t *t1 = p + 15U;
+  uint64_t *z2 = q + 10U;
+  uint64_t *t2 = q + 15U;
   times_2d(tmp10, t1);
   fmul0(tmp10, tmp10, t2);
   times_2(tmp2, z1);
@@ -320,13 +316,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp10, tmp2, tmp10);
   fsum(tmp2, tmp4, tmp3);
   uint64_t *tmp_g = tmp;
-  uint64_t *tmp_h = tmp + (uint32_t)5U;
-  uint64_t *tmp_e = tmp + (uint32_t)20U;
-  uint64_t *tmp_f = tmp + (uint32_t)25U;
+  uint64_t *tmp_h = tmp + 5U;
+  uint64_t *tmp_e = tmp + 20U;
+  uint64_t *tmp_f = tmp + 25U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -336,64 +332,64 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 void Hacl_Impl_Ed25519_PointConstants_make_point_inf(uint64_t *b)
 {
   uint64_t *x = b;
-  uint64_t *y = b + (uint32_t)5U;
-  uint64_t *z = b + (uint32_t)10U;
-  uint64_t *t = b + (uint32_t)15U;
-  x[0U] = (uint64_t)0U;
-  x[1U] = (uint64_t)0U;
-  x[2U] = (uint64_t)0U;
-  x[3U] = (uint64_t)0U;
-  x[4U] = (uint64_t)0U;
-  y[0U] = (uint64_t)1U;
-  y[1U] = (uint64_t)0U;
-  y[2U] = (uint64_t)0U;
-  y[3U] = (uint64_t)0U;
-  y[4U] = (uint64_t)0U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  t[0U] = (uint64_t)0U;
-  t[1U] = (uint64_t)0U;
-  t[2U] = (uint64_t)0U;
-  t[3U] = (uint64_t)0U;
-  t[4U] = (uint64_t)0U;
+  uint64_t *y = b + 5U;
+  uint64_t *z = b + 10U;
+  uint64_t *t = b + 15U;
+  x[0U] = 0ULL;
+  x[1U] = 0ULL;
+  x[2U] = 0ULL;
+  x[3U] = 0ULL;
+  x[4U] = 0ULL;
+  y[0U] = 1ULL;
+  y[1U] = 0ULL;
+  y[2U] = 0ULL;
+  y[3U] = 0ULL;
+  y[4U] = 0ULL;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  t[0U] = 0ULL;
+  t[1U] = 0ULL;
+  t[2U] = 0ULL;
+  t[3U] = 0ULL;
+  t[4U] = 0ULL;
 }
 
 static inline void pow2_252m2(uint64_t *out, uint64_t *z)
 {
   uint64_t buf[20U] = { 0U };
   uint64_t *a = buf;
-  uint64_t *t00 = buf + (uint32_t)5U;
-  uint64_t *b0 = buf + (uint32_t)10U;
-  uint64_t *c0 = buf + (uint32_t)15U;
-  fsquare_times(a, z, (uint32_t)1U);
-  fsquare_times(t00, a, (uint32_t)2U);
+  uint64_t *t00 = buf + 5U;
+  uint64_t *b0 = buf + 10U;
+  uint64_t *c0 = buf + 15U;
+  fsquare_times(a, z, 1U);
+  fsquare_times(t00, a, 2U);
   fmul0(b0, t00, z);
   fmul0(a, b0, a);
-  fsquare_times(t00, a, (uint32_t)1U);
+  fsquare_times(t00, a, 1U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)5U);
+  fsquare_times(t00, b0, 5U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)10U);
+  fsquare_times(t00, b0, 10U);
   fmul0(c0, t00, b0);
-  fsquare_times(t00, c0, (uint32_t)20U);
+  fsquare_times(t00, c0, 20U);
   fmul0(t00, t00, c0);
-  fsquare_times_inplace(t00, (uint32_t)10U);
+  fsquare_times_inplace(t00, 10U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)50U);
+  fsquare_times(t00, b0, 50U);
   uint64_t *a0 = buf;
-  uint64_t *t0 = buf + (uint32_t)5U;
-  uint64_t *b = buf + (uint32_t)10U;
-  uint64_t *c = buf + (uint32_t)15U;
-  fsquare_times(a0, z, (uint32_t)1U);
+  uint64_t *t0 = buf + 5U;
+  uint64_t *b = buf + 10U;
+  uint64_t *c = buf + 15U;
+  fsquare_times(a0, z, 1U);
   fmul0(c, t0, b);
-  fsquare_times(t0, c, (uint32_t)100U);
+  fsquare_times(t0, c, 100U);
   fmul0(t0, t0, c);
-  fsquare_times_inplace(t0, (uint32_t)50U);
+  fsquare_times_inplace(t0, 50U);
   fmul0(t0, t0, b);
-  fsquare_times_inplace(t0, (uint32_t)2U);
+  fsquare_times_inplace(t0, 2U);
   fmul0(out, t0, a0);
 }
 
@@ -404,23 +400,17 @@ static inline bool is_0(uint64_t *x)
   uint64_t x2 = x[2U];
   uint64_t x3 = x[3U];
   uint64_t x4 = x[4U];
-  return
-    x0
-    == (uint64_t)0U
-    && x1 == (uint64_t)0U
-    && x2 == (uint64_t)0U
-    && x3 == (uint64_t)0U
-    && x4 == (uint64_t)0U;
+  return x0 == 0ULL && x1 == 0ULL && x2 == 0ULL && x3 == 0ULL && x4 == 0ULL;
 }
 
 static inline void mul_modp_sqrt_m1(uint64_t *x)
 {
   uint64_t sqrt_m1[5U] = { 0U };
-  sqrt_m1[0U] = (uint64_t)0x00061b274a0ea0b0U;
-  sqrt_m1[1U] = (uint64_t)0x0000d5a5fc8f189dU;
-  sqrt_m1[2U] = (uint64_t)0x0007ef5e9cbd0c60U;
-  sqrt_m1[3U] = (uint64_t)0x00078595a6804c9eU;
-  sqrt_m1[4U] = (uint64_t)0x0002b8324804fc1dU;
+  sqrt_m1[0U] = 0x00061b274a0ea0b0ULL;
+  sqrt_m1[1U] = 0x0000d5a5fc8f189dULL;
+  sqrt_m1[2U] = 0x0007ef5e9cbd0c60ULL;
+  sqrt_m1[3U] = 0x00078595a6804c9eULL;
+  sqrt_m1[4U] = 0x0002b8324804fc1dULL;
   fmul0(x, x, sqrt_m1);
 }
 
@@ -436,11 +426,11 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   bool
   b =
     x00
-    >= (uint64_t)0x7ffffffffffedU
-    && x1 == (uint64_t)0x7ffffffffffffU
-    && x21 == (uint64_t)0x7ffffffffffffU
-    && x30 == (uint64_t)0x7ffffffffffffU
-    && x4 == (uint64_t)0x7ffffffffffffU;
+    >= 0x7ffffffffffedULL
+    && x1 == 0x7ffffffffffffULL
+    && x21 == 0x7ffffffffffffULL
+    && x30 == 0x7ffffffffffffULL
+    && x4 == 0x7ffffffffffffULL;
   bool res;
   if (b)
   {
@@ -450,14 +440,14 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   {
     uint64_t tmp1[20U] = { 0U };
     uint64_t *one = tmp1;
-    uint64_t *y2 = tmp1 + (uint32_t)5U;
-    uint64_t *dyyi = tmp1 + (uint32_t)10U;
-    uint64_t *dyy = tmp1 + (uint32_t)15U;
-    one[0U] = (uint64_t)1U;
-    one[1U] = (uint64_t)0U;
-    one[2U] = (uint64_t)0U;
-    one[3U] = (uint64_t)0U;
-    one[4U] = (uint64_t)0U;
+    uint64_t *y2 = tmp1 + 5U;
+    uint64_t *dyyi = tmp1 + 10U;
+    uint64_t *dyy = tmp1 + 15U;
+    one[0U] = 1ULL;
+    one[1U] = 0ULL;
+    one[2U] = 0ULL;
+    one[3U] = 0ULL;
+    one[4U] = 0ULL;
     fsquare(y2, y);
     times_d(dyy, y2);
     fsum(dyy, dyy, one);
@@ -470,37 +460,37 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
     uint8_t z;
     if (x2_is_0)
     {
-      if (sign == (uint64_t)0U)
+      if (sign == 0ULL)
       {
-        x[0U] = (uint64_t)0U;
-        x[1U] = (uint64_t)0U;
-        x[2U] = (uint64_t)0U;
-        x[3U] = (uint64_t)0U;
-        x[4U] = (uint64_t)0U;
-        z = (uint8_t)1U;
+        x[0U] = 0ULL;
+        x[1U] = 0ULL;
+        x[2U] = 0ULL;
+        x[3U] = 0ULL;
+        x[4U] = 0ULL;
+        z = 1U;
       }
       else
       {
-        z = (uint8_t)0U;
+        z = 0U;
       }
     }
     else
     {
-      z = (uint8_t)2U;
+      z = 2U;
     }
-    if (z == (uint8_t)0U)
+    if (z == 0U)
     {
       res = false;
     }
-    else if (z == (uint8_t)1U)
+    else if (z == 1U)
     {
       res = true;
     }
     else
     {
       uint64_t *x210 = tmp;
-      uint64_t *x31 = tmp + (uint32_t)5U;
-      uint64_t *t00 = tmp + (uint32_t)10U;
+      uint64_t *x31 = tmp + 5U;
+      uint64_t *t00 = tmp + 10U;
       pow2_252m2(x31, x210);
       fsquare(t00, x31);
       fdifference(t00, t00, x210);
@@ -512,8 +502,8 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
         mul_modp_sqrt_m1(x31);
       }
       uint64_t *x211 = tmp;
-      uint64_t *x3 = tmp + (uint32_t)5U;
-      uint64_t *t01 = tmp + (uint32_t)10U;
+      uint64_t *x3 = tmp + 5U;
+      uint64_t *t01 = tmp + 10U;
       fsquare(t01, x3);
       fdifference(t01, t01, x211);
       Hacl_Bignum25519_reduce_513(t01);
@@ -525,23 +515,23 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
       }
       else
       {
-        uint64_t *x32 = tmp + (uint32_t)5U;
-        uint64_t *t0 = tmp + (uint32_t)10U;
+        uint64_t *x32 = tmp + 5U;
+        uint64_t *t0 = tmp + 10U;
         reduce(x32);
         uint64_t x0 = x32[0U];
-        uint64_t x01 = x0 & (uint64_t)1U;
+        uint64_t x01 = x0 & 1ULL;
         if (!(x01 == sign))
         {
-          t0[0U] = (uint64_t)0U;
-          t0[1U] = (uint64_t)0U;
-          t0[2U] = (uint64_t)0U;
-          t0[3U] = (uint64_t)0U;
-          t0[4U] = (uint64_t)0U;
+          t0[0U] = 0ULL;
+          t0[1U] = 0ULL;
+          t0[2U] = 0ULL;
+          t0[3U] = 0ULL;
+          t0[4U] = 0ULL;
           fdifference(x32, t0, x32);
           Hacl_Bignum25519_reduce_513(x32);
           reduce(x32);
         }
-        memcpy(x, x32, (uint32_t)5U * sizeof (uint64_t));
+        memcpy(x, x32, 5U * sizeof (uint64_t));
         res = true;
       }
     }
@@ -554,9 +544,9 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 {
   uint64_t tmp[10U] = { 0U };
   uint64_t *y = tmp;
-  uint64_t *x = tmp + (uint32_t)5U;
+  uint64_t *x = tmp + 5U;
   uint8_t s31 = s[31U];
-  uint8_t z = s31 >> (uint32_t)7U;
+  uint8_t z = (uint32_t)s31 >> 7U;
   uint64_t sign = (uint64_t)z;
   Hacl_Bignum25519_load_51(y, s);
   bool z0 = recover_x(x, y, sign);
@@ -568,16 +558,16 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
   else
   {
     uint64_t *outx = out;
-    uint64_t *outy = out + (uint32_t)5U;
-    uint64_t *outz = out + (uint32_t)10U;
-    uint64_t *outt = out + (uint32_t)15U;
-    memcpy(outx, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(outy, y, (uint32_t)5U * sizeof (uint64_t));
-    outz[0U] = (uint64_t)1U;
-    outz[1U] = (uint64_t)0U;
-    outz[2U] = (uint64_t)0U;
-    outz[3U] = (uint64_t)0U;
-    outz[4U] = (uint64_t)0U;
+    uint64_t *outy = out + 5U;
+    uint64_t *outz = out + 10U;
+    uint64_t *outt = out + 15U;
+    memcpy(outx, x, 5U * sizeof (uint64_t));
+    memcpy(outy, y, 5U * sizeof (uint64_t));
+    outz[0U] = 1ULL;
+    outz[1U] = 0ULL;
+    outz[2U] = 0ULL;
+    outz[3U] = 0ULL;
+    outz[4U] = 0ULL;
     fmul0(outt, x, y);
     res = true;
   }
@@ -588,25 +578,25 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p)
 {
   uint64_t tmp[15U] = { 0U };
-  uint64_t *x = tmp + (uint32_t)5U;
-  uint64_t *out = tmp + (uint32_t)10U;
+  uint64_t *x = tmp + 5U;
+  uint64_t *out = tmp + 10U;
   uint64_t *zinv1 = tmp;
-  uint64_t *x1 = tmp + (uint32_t)5U;
-  uint64_t *out1 = tmp + (uint32_t)10U;
+  uint64_t *x1 = tmp + 5U;
+  uint64_t *out1 = tmp + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   Hacl_Bignum25519_inverse(zinv1, pz);
   fmul0(x1, px, zinv1);
   reduce(x1);
   fmul0(out1, py, zinv1);
   Hacl_Bignum25519_reduce_513(out1);
   uint64_t x0 = x[0U];
-  uint64_t b = x0 & (uint64_t)1U;
+  uint64_t b = x0 & 1ULL;
   Hacl_Bignum25519_store_51(z, out);
   uint8_t xbyte = (uint8_t)b;
   uint8_t o31 = z[31U];
-  z[31U] = o31 + (xbyte << (uint32_t)7U);
+  z[31U] = (uint32_t)o31 + ((uint32_t)xbyte << 7U);
 }
 
 static inline void barrett_reduction(uint64_t *z, uint64_t *t)
@@ -621,40 +611,40 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t t7 = t[7U];
   uint64_t t8 = t[8U];
   uint64_t t9 = t[9U];
-  uint64_t m00 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m10 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m20 = (uint64_t)0x000000000014deU;
-  uint64_t m30 = (uint64_t)0x00000000000000U;
-  uint64_t m40 = (uint64_t)0x00000010000000U;
+  uint64_t m00 = 0x12631a5cf5d3edULL;
+  uint64_t m10 = 0xf9dea2f79cd658ULL;
+  uint64_t m20 = 0x000000000014deULL;
+  uint64_t m30 = 0x00000000000000ULL;
+  uint64_t m40 = 0x00000010000000ULL;
   uint64_t m0 = m00;
   uint64_t m1 = m10;
   uint64_t m2 = m20;
   uint64_t m3 = m30;
   uint64_t m4 = m40;
-  uint64_t m010 = (uint64_t)0x9ce5a30a2c131bU;
-  uint64_t m110 = (uint64_t)0x215d086329a7edU;
-  uint64_t m210 = (uint64_t)0xffffffffeb2106U;
-  uint64_t m310 = (uint64_t)0xffffffffffffffU;
-  uint64_t m410 = (uint64_t)0x00000fffffffffU;
+  uint64_t m010 = 0x9ce5a30a2c131bULL;
+  uint64_t m110 = 0x215d086329a7edULL;
+  uint64_t m210 = 0xffffffffeb2106ULL;
+  uint64_t m310 = 0xffffffffffffffULL;
+  uint64_t m410 = 0x00000fffffffffULL;
   uint64_t mu0 = m010;
   uint64_t mu1 = m110;
   uint64_t mu2 = m210;
   uint64_t mu3 = m310;
   uint64_t mu4 = m410;
-  uint64_t y_ = (t5 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_ = t4 >> (uint32_t)24U;
+  uint64_t y_ = (t5 & 0xffffffULL) << 32U;
+  uint64_t x_ = t4 >> 24U;
   uint64_t z00 = x_ | y_;
-  uint64_t y_0 = (t6 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_0 = t5 >> (uint32_t)24U;
+  uint64_t y_0 = (t6 & 0xffffffULL) << 32U;
+  uint64_t x_0 = t5 >> 24U;
   uint64_t z10 = x_0 | y_0;
-  uint64_t y_1 = (t7 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_1 = t6 >> (uint32_t)24U;
+  uint64_t y_1 = (t7 & 0xffffffULL) << 32U;
+  uint64_t x_1 = t6 >> 24U;
   uint64_t z20 = x_1 | y_1;
-  uint64_t y_2 = (t8 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_2 = t7 >> (uint32_t)24U;
+  uint64_t y_2 = (t8 & 0xffffffULL) << 32U;
+  uint64_t x_2 = t7 >> 24U;
   uint64_t z30 = x_2 | y_2;
-  uint64_t y_3 = (t9 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_3 = t8 >> (uint32_t)24U;
+  uint64_t y_3 = (t9 & 0xffffffULL) << 32U;
+  uint64_t x_3 = t8 >> 24U;
   uint64_t z40 = x_3 | y_3;
   uint64_t q0 = z00;
   uint64_t q1 = z10;
@@ -707,55 +697,37 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 z6 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z7 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z8 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, (uint32_t)56U);
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, 56U);
   FStar_UInt128_uint128 c00 = carry0;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), 56U);
   FStar_UInt128_uint128 c10 = carry1;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), 56U);
   FStar_UInt128_uint128 c20 = carry2;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), 56U);
   FStar_UInt128_uint128 c30 = carry3;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), 56U);
   uint64_t
-  t100 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30))
-    & (uint64_t)0xffffffffffffffU;
+  t100 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c40 = carry4;
   uint64_t t410 = t100;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), 56U);
   uint64_t
-  t101 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40))
-    & (uint64_t)0xffffffffffffffU;
+  t101 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t51 = t101;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), 56U);
   uint64_t
-  t102 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t102 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t61 = t102;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), 56U);
   uint64_t
-  t103 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t103 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t71 = t103;
-  FStar_UInt128_uint128
-  carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), 56U);
   uint64_t
-  t104 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t104 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry8;
   uint64_t t81 = t104;
   uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8);
@@ -765,20 +737,20 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t qmu7_ = t71;
   uint64_t qmu8_ = t81;
   uint64_t qmu9_ = t91;
-  uint64_t y_4 = (qmu5_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_4 = qmu4_ >> (uint32_t)40U;
+  uint64_t y_4 = (qmu5_ & 0xffffffffffULL) << 16U;
+  uint64_t x_4 = qmu4_ >> 40U;
   uint64_t z02 = x_4 | y_4;
-  uint64_t y_5 = (qmu6_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_5 = qmu5_ >> (uint32_t)40U;
+  uint64_t y_5 = (qmu6_ & 0xffffffffffULL) << 16U;
+  uint64_t x_5 = qmu5_ >> 40U;
   uint64_t z12 = x_5 | y_5;
-  uint64_t y_6 = (qmu7_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_6 = qmu6_ >> (uint32_t)40U;
+  uint64_t y_6 = (qmu7_ & 0xffffffffffULL) << 16U;
+  uint64_t x_6 = qmu6_ >> 40U;
   uint64_t z22 = x_6 | y_6;
-  uint64_t y_7 = (qmu8_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_7 = qmu7_ >> (uint32_t)40U;
+  uint64_t y_7 = (qmu8_ & 0xffffffffffULL) << 16U;
+  uint64_t x_7 = qmu7_ >> 40U;
   uint64_t z32 = x_7 | y_7;
-  uint64_t y_8 = (qmu9_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_8 = qmu8_ >> (uint32_t)40U;
+  uint64_t y_8 = (qmu9_ & 0xffffffffffULL) << 16U;
+  uint64_t x_8 = qmu8_ >> 40U;
   uint64_t z42 = x_8 | y_8;
   uint64_t qdiv0 = z02;
   uint64_t qdiv1 = z12;
@@ -789,7 +761,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
-  uint64_t r4 = t4 & (uint64_t)0xffffffffffU;
+  uint64_t r4 = t4 & 0xffffffffffULL;
   FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(qdiv0, m0);
   FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(qdiv0, m1);
   FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(qdiv0, m2);
@@ -805,18 +777,18 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(qdiv3, m0);
   FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1);
   FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0);
-  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U);
-  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, 56U);
+  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry9;
   uint64_t t010 = t105;
   FStar_UInt128_uint128
   carry10 =
     FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t106 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c11 = carry10;
   uint64_t t110 = t106;
   FStar_UInt128_uint128
@@ -825,14 +797,14 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy11),
           xy20),
         c11),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t107 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02,
             xy11),
           xy20),
         c11))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c21 = carry11;
   uint64_t t210 = t107;
   FStar_UInt128_uint128
@@ -842,7 +814,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t108 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03,
@@ -850,7 +822,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c31 = carry;
   uint64_t t310 = t108;
   uint64_t
@@ -861,67 +833,67 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy31),
           xy40),
         c31))
-    & (uint64_t)0xffffffffffU;
+    & 0xffffffffffULL;
   uint64_t qmul0 = t010;
   uint64_t qmul1 = t110;
   uint64_t qmul2 = t210;
   uint64_t qmul3 = t310;
   uint64_t qmul4 = t411;
-  uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U;
-  uint64_t t109 = (b5 << (uint32_t)56U) + r0 - qmul0;
+  uint64_t b5 = (r0 - qmul0) >> 63U;
+  uint64_t t109 = (b5 << 56U) + r0 - qmul0;
   uint64_t c1 = b5;
   uint64_t t011 = t109;
-  uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U;
-  uint64_t t1010 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1);
+  uint64_t b6 = (r1 - (qmul1 + c1)) >> 63U;
+  uint64_t t1010 = (b6 << 56U) + r1 - (qmul1 + c1);
   uint64_t c2 = b6;
   uint64_t t111 = t1010;
-  uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U;
-  uint64_t t1011 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2);
+  uint64_t b7 = (r2 - (qmul2 + c2)) >> 63U;
+  uint64_t t1011 = (b7 << 56U) + r2 - (qmul2 + c2);
   uint64_t c3 = b7;
   uint64_t t211 = t1011;
-  uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U;
-  uint64_t t1012 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3);
+  uint64_t b8 = (r3 - (qmul3 + c3)) >> 63U;
+  uint64_t t1012 = (b8 << 56U) + r3 - (qmul3 + c3);
   uint64_t c4 = b8;
   uint64_t t311 = t1012;
-  uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U;
-  uint64_t t1013 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4);
+  uint64_t b9 = (r4 - (qmul4 + c4)) >> 63U;
+  uint64_t t1013 = (b9 << 40U) + r4 - (qmul4 + c4);
   uint64_t t412 = t1013;
   uint64_t s0 = t011;
   uint64_t s1 = t111;
   uint64_t s2 = t211;
   uint64_t s3 = t311;
   uint64_t s4 = t412;
-  uint64_t m01 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m11 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m21 = (uint64_t)0x000000000014deU;
-  uint64_t m31 = (uint64_t)0x00000000000000U;
-  uint64_t m41 = (uint64_t)0x00000010000000U;
+  uint64_t m01 = 0x12631a5cf5d3edULL;
+  uint64_t m11 = 0xf9dea2f79cd658ULL;
+  uint64_t m21 = 0x000000000014deULL;
+  uint64_t m31 = 0x00000000000000ULL;
+  uint64_t m41 = 0x00000010000000ULL;
   uint64_t y0 = m01;
   uint64_t y1 = m11;
   uint64_t y2 = m21;
   uint64_t y3 = m31;
   uint64_t y4 = m41;
-  uint64_t b10 = (s0 - y0) >> (uint32_t)63U;
-  uint64_t t1014 = (b10 << (uint32_t)56U) + s0 - y0;
+  uint64_t b10 = (s0 - y0) >> 63U;
+  uint64_t t1014 = (b10 << 56U) + s0 - y0;
   uint64_t b0 = b10;
   uint64_t t01 = t1014;
-  uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U;
-  uint64_t t1015 = (b11 << (uint32_t)56U) + s1 - (y1 + b0);
+  uint64_t b11 = (s1 - (y1 + b0)) >> 63U;
+  uint64_t t1015 = (b11 << 56U) + s1 - (y1 + b0);
   uint64_t b1 = b11;
   uint64_t t11 = t1015;
-  uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U;
-  uint64_t t1016 = (b12 << (uint32_t)56U) + s2 - (y2 + b1);
+  uint64_t b12 = (s2 - (y2 + b1)) >> 63U;
+  uint64_t t1016 = (b12 << 56U) + s2 - (y2 + b1);
   uint64_t b2 = b12;
   uint64_t t21 = t1016;
-  uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U;
-  uint64_t t1017 = (b13 << (uint32_t)56U) + s3 - (y3 + b2);
+  uint64_t b13 = (s3 - (y3 + b2)) >> 63U;
+  uint64_t t1017 = (b13 << 56U) + s3 - (y3 + b2);
   uint64_t b3 = b13;
   uint64_t t31 = t1017;
-  uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U;
-  uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3);
+  uint64_t b = (s4 - (y4 + b3)) >> 63U;
+  uint64_t t10 = (b << 56U) + s4 - (y4 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t10;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z03 = s0 ^ (mask & (s0 ^ t01));
   uint64_t z13 = s1 ^ (mask & (s1 ^ t11));
   uint64_t z23 = s2 ^ (mask & (s2 ^ t21));
@@ -1008,72 +980,48 @@ static inline void mul_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   FStar_UInt128_uint128 z60 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z70 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z80 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, (uint32_t)56U);
-  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, 56U);
+  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry0;
   uint64_t t0 = t10;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), 56U);
   uint64_t
-  t11 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0))
-    & (uint64_t)0xffffffffffffffU;
+  t11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c1 = carry1;
   uint64_t t1 = t11;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), 56U);
   uint64_t
-  t12 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1))
-    & (uint64_t)0xffffffffffffffU;
+  t12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c2 = carry2;
   uint64_t t2 = t12;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), 56U);
   uint64_t
-  t13 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2))
-    & (uint64_t)0xffffffffffffffU;
+  t13 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c3 = carry3;
   uint64_t t3 = t13;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), 56U);
   uint64_t
-  t14 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3))
-    & (uint64_t)0xffffffffffffffU;
+  t14 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c4 = carry4;
   uint64_t t4 = t14;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), 56U);
   uint64_t
-  t15 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4))
-    & (uint64_t)0xffffffffffffffU;
+  t15 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t5 = t15;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), 56U);
   uint64_t
-  t16 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t16 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t6 = t16;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), 56U);
   uint64_t
-  t17 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t17 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t7 = t17;
-  FStar_UInt128_uint128
-  carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), 56U);
   uint64_t
-  t =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry;
   uint64_t t8 = t;
   uint64_t t9 = FStar_UInt128_uint128_to_uint64(c8);
@@ -1112,54 +1060,54 @@ static inline void add_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   uint64_t y2 = y[2U];
   uint64_t y3 = y[3U];
   uint64_t y4 = y[4U];
-  uint64_t carry0 = (x0 + y0) >> (uint32_t)56U;
-  uint64_t t0 = (x0 + y0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry0 = (x0 + y0) >> 56U;
+  uint64_t t0 = (x0 + y0) & 0xffffffffffffffULL;
   uint64_t t00 = t0;
   uint64_t c0 = carry0;
-  uint64_t carry1 = (x1 + y1 + c0) >> (uint32_t)56U;
-  uint64_t t1 = (x1 + y1 + c0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry1 = (x1 + y1 + c0) >> 56U;
+  uint64_t t1 = (x1 + y1 + c0) & 0xffffffffffffffULL;
   uint64_t t10 = t1;
   uint64_t c1 = carry1;
-  uint64_t carry2 = (x2 + y2 + c1) >> (uint32_t)56U;
-  uint64_t t2 = (x2 + y2 + c1) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry2 = (x2 + y2 + c1) >> 56U;
+  uint64_t t2 = (x2 + y2 + c1) & 0xffffffffffffffULL;
   uint64_t t20 = t2;
   uint64_t c2 = carry2;
-  uint64_t carry = (x3 + y3 + c2) >> (uint32_t)56U;
-  uint64_t t3 = (x3 + y3 + c2) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry = (x3 + y3 + c2) >> 56U;
+  uint64_t t3 = (x3 + y3 + c2) & 0xffffffffffffffULL;
   uint64_t t30 = t3;
   uint64_t c3 = carry;
   uint64_t t4 = x4 + y4 + c3;
-  uint64_t m0 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m1 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m2 = (uint64_t)0x000000000014deU;
-  uint64_t m3 = (uint64_t)0x00000000000000U;
-  uint64_t m4 = (uint64_t)0x00000010000000U;
+  uint64_t m0 = 0x12631a5cf5d3edULL;
+  uint64_t m1 = 0xf9dea2f79cd658ULL;
+  uint64_t m2 = 0x000000000014deULL;
+  uint64_t m3 = 0x00000000000000ULL;
+  uint64_t m4 = 0x00000010000000ULL;
   uint64_t y01 = m0;
   uint64_t y11 = m1;
   uint64_t y21 = m2;
   uint64_t y31 = m3;
   uint64_t y41 = m4;
-  uint64_t b5 = (t00 - y01) >> (uint32_t)63U;
-  uint64_t t5 = (b5 << (uint32_t)56U) + t00 - y01;
+  uint64_t b5 = (t00 - y01) >> 63U;
+  uint64_t t5 = (b5 << 56U) + t00 - y01;
   uint64_t b0 = b5;
   uint64_t t01 = t5;
-  uint64_t b6 = (t10 - (y11 + b0)) >> (uint32_t)63U;
-  uint64_t t6 = (b6 << (uint32_t)56U) + t10 - (y11 + b0);
+  uint64_t b6 = (t10 - (y11 + b0)) >> 63U;
+  uint64_t t6 = (b6 << 56U) + t10 - (y11 + b0);
   uint64_t b1 = b6;
   uint64_t t11 = t6;
-  uint64_t b7 = (t20 - (y21 + b1)) >> (uint32_t)63U;
-  uint64_t t7 = (b7 << (uint32_t)56U) + t20 - (y21 + b1);
+  uint64_t b7 = (t20 - (y21 + b1)) >> 63U;
+  uint64_t t7 = (b7 << 56U) + t20 - (y21 + b1);
   uint64_t b2 = b7;
   uint64_t t21 = t7;
-  uint64_t b8 = (t30 - (y31 + b2)) >> (uint32_t)63U;
-  uint64_t t8 = (b8 << (uint32_t)56U) + t30 - (y31 + b2);
+  uint64_t b8 = (t30 - (y31 + b2)) >> 63U;
+  uint64_t t8 = (b8 << 56U) + t30 - (y31 + b2);
   uint64_t b3 = b8;
   uint64_t t31 = t8;
-  uint64_t b = (t4 - (y41 + b3)) >> (uint32_t)63U;
-  uint64_t t = (b << (uint32_t)56U) + t4 - (y41 + b3);
+  uint64_t b = (t4 - (y41 + b3)) >> 63U;
+  uint64_t t = (b << 56U) + t4 - (y41 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z00 = t00 ^ (mask & (t00 ^ t01));
   uint64_t z10 = t10 ^ (mask & (t10 ^ t11));
   uint64_t z20 = t20 ^ (mask & (t20 ^ t21));
@@ -1194,35 +1142,35 @@ static inline bool gte_q(uint64_t *s)
   uint64_t s2 = s[2U];
   uint64_t s3 = s[3U];
   uint64_t s4 = s[4U];
-  if (s4 > (uint64_t)0x00000010000000U)
+  if (s4 > 0x00000010000000ULL)
   {
     return true;
   }
-  if (s4 < (uint64_t)0x00000010000000U)
+  if (s4 < 0x00000010000000ULL)
   {
     return false;
   }
-  if (s3 > (uint64_t)0x00000000000000U)
+  if (s3 > 0x00000000000000ULL)
   {
     return true;
   }
-  if (s2 > (uint64_t)0x000000000014deU)
+  if (s2 > 0x000000000014deULL)
   {
     return true;
   }
-  if (s2 < (uint64_t)0x000000000014deU)
+  if (s2 < 0x000000000014deULL)
   {
     return false;
   }
-  if (s1 > (uint64_t)0xf9dea2f79cd658U)
+  if (s1 > 0xf9dea2f79cd658ULL)
   {
     return true;
   }
-  if (s1 < (uint64_t)0xf9dea2f79cd658U)
+  if (s1 < 0xf9dea2f79cd658ULL)
   {
     return false;
   }
-  if (s0 >= (uint64_t)0x12631a5cf5d3edU)
+  if (s0 >= 0x12631a5cf5d3edULL)
   {
     return true;
   }
@@ -1248,19 +1196,19 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *pxqz = tmp;
-  uint64_t *qxpz = tmp + (uint32_t)5U;
-  fmul0(pxqz, p, q + (uint32_t)10U);
+  uint64_t *qxpz = tmp + 5U;
+  fmul0(pxqz, p, q + 10U);
   reduce(pxqz);
-  fmul0(qxpz, q, p + (uint32_t)10U);
+  fmul0(qxpz, q, p + 10U);
   reduce(qxpz);
   bool b = eq(pxqz, qxpz);
   if (b)
   {
-    uint64_t *pyqz = tmp + (uint32_t)10U;
-    uint64_t *qypz = tmp + (uint32_t)15U;
-    fmul0(pyqz, p + (uint32_t)5U, q + (uint32_t)10U);
+    uint64_t *pyqz = tmp + 10U;
+    uint64_t *qypz = tmp + 15U;
+    fmul0(pyqz, p + 5U, q + 10U);
     reduce(pyqz);
-    fmul0(qypz, q + (uint32_t)5U, p + (uint32_t)10U);
+    fmul0(qypz, q + 5U, p + 10U);
     reduce(qypz);
     return eq(pyqz, qypz);
   }
@@ -1270,23 +1218,23 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 void Hacl_Impl_Ed25519_PointNegate_point_negate(uint64_t *p, uint64_t *out)
 {
   uint64_t zero[5U] = { 0U };
-  zero[0U] = (uint64_t)0U;
-  zero[1U] = (uint64_t)0U;
-  zero[2U] = (uint64_t)0U;
-  zero[3U] = (uint64_t)0U;
-  zero[4U] = (uint64_t)0U;
+  zero[0U] = 0ULL;
+  zero[1U] = 0ULL;
+  zero[2U] = 0ULL;
+  zero[3U] = 0ULL;
+  zero[4U] = 0ULL;
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
-  uint64_t *z = p + (uint32_t)10U;
-  uint64_t *t = p + (uint32_t)15U;
+  uint64_t *y = p + 5U;
+  uint64_t *z = p + 10U;
+  uint64_t *t = p + 15U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  uint64_t *t1 = out + (uint32_t)15U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  uint64_t *t1 = out + 15U;
   fdifference(x1, zero, x);
   Hacl_Bignum25519_reduce_513(x1);
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(z1, z, (uint32_t)5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memcpy(z1, z, 5U * sizeof (uint64_t));
   fdifference(t1, zero, t);
   Hacl_Bignum25519_reduce_513(t1);
 }
@@ -1295,11 +1243,11 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1307,42 +1255,34 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
   uint64_t table[320U] = { 0U };
   uint64_t tmp[20U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)20U;
+  uint64_t *t1 = table + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table + (2U * i + 2U) * 20U, tmp, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 20U, tmp, 20U * sizeof (uint64_t)););
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp0[20U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 20U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)20U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 20U;
+      for (uint32_t i = 0U; i < 20U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -1354,14 +1294,14 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)20U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 20U;
+    for (uint32_t i = 0U; i < 20U; i++)
     {
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
@@ -1373,107 +1313,97 @@ static inline void point_mul_g(uint64_t *out, uint8_t *scalar)
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t q1[20U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  uint64_t *gt = q1 + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  uint64_t *gt = q1 + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   uint64_t
   q2[20U] =
     {
-      (uint64_t)13559344787725U, (uint64_t)2051621493703448U, (uint64_t)1947659315640708U,
-      (uint64_t)626856790370168U, (uint64_t)1592804284034836U, (uint64_t)1781728767459187U,
-      (uint64_t)278818420518009U, (uint64_t)2038030359908351U, (uint64_t)910625973862690U,
-      (uint64_t)471887343142239U, (uint64_t)1298543306606048U, (uint64_t)794147365642417U,
-      (uint64_t)129968992326749U, (uint64_t)523140861678572U, (uint64_t)1166419653909231U,
-      (uint64_t)2009637196928390U, (uint64_t)1288020222395193U, (uint64_t)1007046974985829U,
-      (uint64_t)208981102651386U, (uint64_t)2074009315253380U
+      13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL, 626856790370168ULL,
+      1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL, 2038030359908351ULL,
+      910625973862690ULL, 471887343142239ULL, 1298543306606048ULL, 794147365642417ULL,
+      129968992326749ULL, 523140861678572ULL, 1166419653909231ULL, 2009637196928390ULL,
+      1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL, 2074009315253380ULL
     };
   uint64_t
   q3[20U] =
     {
-      (uint64_t)557549315715710U, (uint64_t)196756086293855U, (uint64_t)846062225082495U,
-      (uint64_t)1865068224838092U, (uint64_t)991112090754908U, (uint64_t)522916421512828U,
-      (uint64_t)2098523346722375U, (uint64_t)1135633221747012U, (uint64_t)858420432114866U,
-      (uint64_t)186358544306082U, (uint64_t)1044420411868480U, (uint64_t)2080052304349321U,
-      (uint64_t)557301814716724U, (uint64_t)1305130257814057U, (uint64_t)2126012765451197U,
-      (uint64_t)1441004402875101U, (uint64_t)353948968859203U, (uint64_t)470765987164835U,
-      (uint64_t)1507675957683570U, (uint64_t)1086650358745097U
+      557549315715710ULL, 196756086293855ULL, 846062225082495ULL, 1865068224838092ULL,
+      991112090754908ULL, 522916421512828ULL, 2098523346722375ULL, 1135633221747012ULL,
+      858420432114866ULL, 186358544306082ULL, 1044420411868480ULL, 2080052304349321ULL,
+      557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL, 1441004402875101ULL,
+      353948968859203ULL, 470765987164835ULL, 1507675957683570ULL, 1086650358745097ULL
     };
   uint64_t
   q4[20U] =
     {
-      (uint64_t)1129953239743101U, (uint64_t)1240339163956160U, (uint64_t)61002583352401U,
-      (uint64_t)2017604552196030U, (uint64_t)1576867829229863U, (uint64_t)1508654942849389U,
-      (uint64_t)270111619664077U, (uint64_t)1253097517254054U, (uint64_t)721798270973250U,
-      (uint64_t)161923365415298U, (uint64_t)828530877526011U, (uint64_t)1494851059386763U,
-      (uint64_t)662034171193976U, (uint64_t)1315349646974670U, (uint64_t)2199229517308806U,
-      (uint64_t)497078277852673U, (uint64_t)1310507715989956U, (uint64_t)1881315714002105U,
-      (uint64_t)2214039404983803U, (uint64_t)1331036420272667U
+      1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL, 2017604552196030ULL,
+      1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL, 1253097517254054ULL,
+      721798270973250ULL, 161923365415298ULL, 828530877526011ULL, 1494851059386763ULL,
+      662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL, 497078277852673ULL,
+      1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL, 1331036420272667ULL
     };
   uint64_t *r1 = bscalar;
-  uint64_t *r2 = bscalar + (uint32_t)1U;
-  uint64_t *r3 = bscalar + (uint32_t)2U;
-  uint64_t *r4 = bscalar + (uint32_t)3U;
+  uint64_t *r2 = bscalar + 1U;
+  uint64_t *r3 = bscalar + 2U;
+  uint64_t *r4 = bscalar + 3U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp[20U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp););
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1481,48 +1411,48 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *g = tmp;
-  uint64_t *bscalar1 = tmp + (uint32_t)20U;
-  uint64_t *bscalar2 = tmp + (uint32_t)24U;
+  uint64_t *bscalar1 = tmp + 20U;
+  uint64_t *bscalar2 = tmp + 24U;
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  uint64_t *gt = g + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  uint64_t *gt = g + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar1;
-    uint8_t *bj = scalar1 + i * (uint32_t)8U;
+    uint8_t *bj = scalar1 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar2;
-    uint8_t *bj = scalar2 + i * (uint32_t)8U;
+    uint8_t *bj = scalar2 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1530,58 +1460,50 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
   uint64_t table2[640U] = { 0U };
   uint64_t tmp1[20U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)20U;
+  uint64_t *t1 = table2 + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q2, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q2, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp1, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table2 + (2U * i + 2U) * 20U, tmp1, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp1, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 20U, tmp1, 20U * sizeof (uint64_t)););
   uint64_t tmp10[20U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
   const
   uint64_t
-  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)20U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)20U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, i1, (uint32_t)5U);
+  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 20U;
+  memcpy(out, (uint64_t *)a_bits_l, 20U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)20U;
-  memcpy(tmp10, (uint64_t *)a_bits_l0, (uint32_t)20U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 20U;
+  memcpy(tmp10, (uint64_t *)a_bits_l0, 20U * sizeof (uint64_t));
   Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp10);
   uint64_t tmp11[20U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l1, (uint32_t)20U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l1, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l2, (uint32_t)20U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l2, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
   }
 }
@@ -1609,13 +1531,13 @@ static inline void store_56(uint8_t *out, uint64_t *b)
   uint32_t b4_ = (uint32_t)b4;
   uint8_t *b8 = out;
   store64_le(b8, b0);
-  uint8_t *b80 = out + (uint32_t)7U;
+  uint8_t *b80 = out + 7U;
   store64_le(b80, b1);
-  uint8_t *b81 = out + (uint32_t)14U;
+  uint8_t *b81 = out + 14U;
   store64_le(b81, b2);
-  uint8_t *b82 = out + (uint32_t)21U;
+  uint8_t *b82 = out + 21U;
   store64_le(b82, b3);
-  store32_le(out + (uint32_t)28U, b4_);
+  store32_le(out + 28U, b4_);
 }
 
 static inline void load_64_bytes(uint64_t *out, uint8_t *b)
@@ -1623,39 +1545,39 @@ static inline void load_64_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u = load64_le(b80);
   uint64_t z = u;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u0 = load64_le(b81);
   uint64_t z0 = u0;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u1 = load64_le(b82);
   uint64_t z1 = u1;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b83 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b83 = b + 21U;
   uint64_t u2 = load64_le(b83);
   uint64_t z2 = u2;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b84 = b + (uint32_t)28U;
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint8_t *b84 = b + 28U;
   uint64_t u3 = load64_le(b84);
   uint64_t z3 = u3;
-  uint64_t b4 = z3 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b85 = b + (uint32_t)35U;
+  uint64_t b4 = z3 & 0xffffffffffffffULL;
+  uint8_t *b85 = b + 35U;
   uint64_t u4 = load64_le(b85);
   uint64_t z4 = u4;
-  uint64_t b5 = z4 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b86 = b + (uint32_t)42U;
+  uint64_t b5 = z4 & 0xffffffffffffffULL;
+  uint8_t *b86 = b + 42U;
   uint64_t u5 = load64_le(b86);
   uint64_t z5 = u5;
-  uint64_t b6 = z5 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b87 = b + (uint32_t)49U;
+  uint64_t b6 = z5 & 0xffffffffffffffULL;
+  uint8_t *b87 = b + 49U;
   uint64_t u6 = load64_le(b87);
   uint64_t z6 = u6;
-  uint64_t b7 = z6 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)56U;
+  uint64_t b7 = z6 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 56U;
   uint64_t u7 = load64_le(b8);
   uint64_t z7 = u7;
-  uint64_t b88 = z7 & (uint64_t)0xffffffffffffffU;
+  uint64_t b88 = z7 & 0xffffffffffffffULL;
   uint8_t b63 = b[63U];
   uint64_t b9 = (uint64_t)b63;
   out[0U] = b0;
@@ -1675,20 +1597,20 @@ static inline void load_32_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u0 = load64_le(b80);
   uint64_t z = u0;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u1 = load64_le(b81);
   uint64_t z0 = u1;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u2 = load64_le(b82);
   uint64_t z1 = u2;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 21U;
   uint64_t u3 = load64_le(b8);
   uint64_t z2 = u3;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint32_t u = load32_le(b + (uint32_t)28U);
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint32_t u = load32_le(b + 28U);
   uint32_t b4 = u;
   uint64_t b41 = (uint64_t)b4;
   out[0U] = b0;
@@ -1703,15 +1625,14 @@ static inline void sha512_pre_msg(uint8_t *hash, uint8_t *prefix, uint32_t len,
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
   Hacl_SHA2_Scalar32_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Streaming_SHA2_update_512(st, prefix, 32U);
   Hacl_Streaming_Types_error_code err1 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
   Hacl_Streaming_SHA2_finish_512(st, hash);
 }
 
@@ -1727,18 +1648,16 @@ sha512_pre_pre2_msg(
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
   Hacl_SHA2_Scalar32_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code
-  err1 = Hacl_Streaming_SHA2_update_512(st, prefix2, (uint32_t)32U);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Streaming_SHA2_update_512(st, prefix, 32U);
+  Hacl_Streaming_Types_error_code err1 = Hacl_Streaming_SHA2_update_512(st, prefix2, 32U);
   Hacl_Streaming_Types_error_code err2 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
-  KRML_HOST_IGNORE(err2);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
+  KRML_MAYBE_UNUSED_VAR(err2);
   Hacl_Streaming_SHA2_finish_512(st, hash);
 }
 
@@ -1777,12 +1696,12 @@ static inline void point_mul_g_compress(uint8_t *out, uint8_t *s)
 
 static inline void secret_expand(uint8_t *expanded, uint8_t *secret)
 {
-  Hacl_Streaming_SHA2_hash_512(secret, (uint32_t)32U, expanded);
+  Hacl_Streaming_SHA2_hash_512(secret, 32U, expanded);
   uint8_t *h_low = expanded;
   uint8_t h_low0 = h_low[0U];
   uint8_t h_low31 = h_low[31U];
-  h_low[0U] = h_low0 & (uint8_t)0xf8U;
-  h_low[31U] = (h_low31 & (uint8_t)127U) | (uint8_t)64U;
+  h_low[0U] = (uint32_t)h_low0 & 0xf8U;
+  h_low[31U] = ((uint32_t)h_low31 & 127U) | 64U;
 }
 
 /********************************************************************************
@@ -1816,8 +1735,8 @@ Compute the expanded keys for an Ed25519 signature.
 void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key)
 {
   uint8_t *public_key = expanded_keys;
-  uint8_t *s_prefix = expanded_keys + (uint32_t)32U;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
+  uint8_t *s_prefix = expanded_keys + 32U;
+  uint8_t *s = expanded_keys + 32U;
   secret_expand(s_prefix, private_key);
   point_mul_g_compress(public_key, s);
 }
@@ -1843,13 +1762,13 @@ Hacl_Ed25519_sign_expanded(
 )
 {
   uint8_t *rs = signature;
-  uint8_t *ss = signature + (uint32_t)32U;
+  uint8_t *ss = signature + 32U;
   uint64_t rq[5U] = { 0U };
   uint64_t hq[5U] = { 0U };
   uint8_t rb[32U] = { 0U };
   uint8_t *public_key = expanded_keys;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
-  uint8_t *prefix = expanded_keys + (uint32_t)64U;
+  uint8_t *s = expanded_keys + 32U;
+  uint8_t *prefix = expanded_keys + 64U;
   sha512_modq_pre(rq, prefix, msg_len, msg);
   store_56(rb, rq);
   point_mul_g_compress(rs, rb);
@@ -1904,7 +1823,7 @@ Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t
     {
       uint8_t hb[32U] = { 0U };
       uint8_t *rs1 = signature;
-      uint8_t *sb = signature + (uint32_t)32U;
+      uint8_t *sb = signature + 32U;
       uint64_t tmp[5U] = { 0U };
       load_32_bytes(tmp, sb);
       bool b1 = gte_q(tmp);
diff --git a/src/Hacl_FFDHE.c b/src/Hacl_FFDHE.c
index 9cf2ddfb..098aa607 100644
--- a/src/Hacl_FFDHE.c
+++ b/src/Hacl_FFDHE.c
@@ -35,23 +35,23 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
   {
     case Spec_FFDHE_FFDHE2048:
       {
-        return (uint32_t)256U;
+        return 256U;
       }
     case Spec_FFDHE_FFDHE3072:
       {
-        return (uint32_t)384U;
+        return 384U;
       }
     case Spec_FFDHE_FFDHE4096:
       {
-        return (uint32_t)512U;
+        return 512U;
       }
     case Spec_FFDHE_FFDHE6144:
       {
-        return (uint32_t)768U;
+        return 768U;
       }
     case Spec_FFDHE_FFDHE8192:
       {
-        return (uint32_t)1024U;
+        return 1024U;
       }
     default:
       {
@@ -63,12 +63,46 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
-  KRML_CHECK_SIZE(sizeof (uint8_t), ffdhe_len(a));
-  uint8_t p_s[ffdhe_len(a)];
-  memset(p_s, 0U, ffdhe_len(a) * sizeof (uint8_t));
+  uint32_t sw;
+  switch (a)
+  {
+    case Spec_FFDHE_FFDHE2048:
+      {
+        sw = 256U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE3072:
+      {
+        sw = 384U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE4096:
+      {
+        sw = 512U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE6144:
+      {
+        sw = 768U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE8192:
+      {
+        sw = 1024U;
+        break;
+      }
+    default:
+      {
+        KRML_HOST_EPRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__);
+        KRML_HOST_EXIT(253U);
+      }
+  }
+  KRML_CHECK_SIZE(sizeof (uint8_t), sw);
+  uint8_t p_s[sw];
+  memset(p_s, 0U, sw * sizeof (uint8_t));
   const uint8_t *p;
   switch (a)
   {
@@ -104,88 +138,80 @@ static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
       }
   }
   uint32_t len = ffdhe_len(a);
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint8_t *os = p_s;
     uint8_t x = p[i];
     os[i] = x;
   }
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ffdhe_len(a), p_s, p_n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
-    (uint32_t)8U * ffdhe_len(a) - (uint32_t)1U,
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - 1U) / 8U + 1U,
+    8U * ffdhe_len(a) - 1U,
     p_n,
     r2_n);
 }
 
 static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, uint64_t *p_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t p_n1[nLen];
   memset(p_n1, 0U, nLen * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, p_n[0U], (uint64_t)1U, p_n1);
-  if ((uint32_t)1U < nLen)
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, p_n[0U], 1ULL, p_n1);
+  if (1U < nLen)
   {
-    uint64_t *a1 = p_n + (uint32_t)1U;
-    uint64_t *res1 = p_n1 + (uint32_t)1U;
+    uint64_t *a1 = p_n + 1U;
+    uint64_t *res1 = p_n1 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (nLen - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (nLen - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (nLen - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < nLen - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (nLen - 1U) / 4U * 4U; i < nLen - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
-    KRML_HOST_IGNORE(c1);
+    KRML_MAYBE_UNUSED_VAR(c1);
   }
   else
   {
-    KRML_HOST_IGNORE(c0);
+    KRML_MAYBE_UNUSED_VAR(c0);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t b2[nLen];
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (uint32_t)0U;
-  uint32_t j = (uint32_t)0U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = 0U;
+  uint32_t j = 0U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], pk_n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], pk_n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc0;
   uint64_t m0 = res;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(pk_n[i], p_n1[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(pk_n[i], p_n1[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -200,21 +226,19 @@ ffdhe_compute_exp(
   uint8_t *res
 )
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t res_n[nLen];
   memset(res_n, 0U, nLen * sizeof (uint64_t));
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(p_n[0U]);
-  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
+  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - 1U) / 8U + 1U,
     p_n,
     mu,
     r2_n,
     b_n,
-    (uint32_t)64U * nLen,
+    64U * nLen,
     sk_n,
     res_n);
   Hacl_Bignum_Convert_bn_to_bytes_be_uint64(ffdhe_len(a), res_n, res);
@@ -227,7 +251,7 @@ uint32_t Hacl_FFDHE_ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 uint64_t *Hacl_FFDHE_new_ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *res = (uint64_t *)KRML_HOST_CALLOC(nLen + nLen, sizeof (uint64_t));
   if (res == NULL)
@@ -249,17 +273,17 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t g_n[nLen];
   memset(g_n, 0U, nLen * sizeof (uint64_t));
-  uint8_t g = (uint8_t)0U;
+  uint8_t g = 0U;
   {
     uint8_t *os = &g;
     uint8_t x = Hacl_Impl_FFDHE_Constants_ffdhe_g2[0U];
     os[0U] = x;
   }
-  Hacl_Bignum_Convert_bn_from_bytes_be_uint64((uint32_t)1U, &g, g_n);
+  Hacl_Bignum_Convert_bn_from_bytes_be_uint64(1U, &g, g_n);
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t sk_n[nLen];
   memset(sk_n, 0U, nLen * sizeof (uint64_t));
@@ -270,7 +294,7 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 void Hacl_FFDHE_ffdhe_secret_to_public(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t p_r2_n[nLen + nLen];
   memset(p_r2_n, 0U, (nLen + nLen) * sizeof (uint64_t));
@@ -288,7 +312,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t sk_n[nLen];
@@ -299,7 +323,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, sk, sk_n);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, pk, pk_n);
   uint64_t m = ffdhe_check_pk(a, pk_n, p_n);
-  if (m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  if (m == 0xFFFFFFFFFFFFFFFFULL)
   {
     ffdhe_compute_exp(a, p_r2_n, sk_n, pk_n, ss);
   }
@@ -310,7 +334,7 @@ uint64_t
 Hacl_FFDHE_ffdhe_shared_secret(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk, uint8_t *ss)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t p_n[nLen + nLen];
   memset(p_n, 0U, (nLen + nLen) * sizeof (uint64_t));
diff --git a/src/Hacl_Frodo1344.c b/src/Hacl_Frodo1344.c
index 0696f34c..cbba507b 100644
--- a/src/Hacl_Frodo1344.c
+++ b/src/Hacl_Frodo1344.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo1344_crypto_bytes = (uint32_t)32U;
+uint32_t Hacl_Frodo1344_crypto_bytes = 32U;
 
-uint32_t Hacl_Frodo1344_crypto_publickeybytes = (uint32_t)21520U;
+uint32_t Hacl_Frodo1344_crypto_publickeybytes = 21520U;
 
-uint32_t Hacl_Frodo1344_crypto_secretkeybytes = (uint32_t)43088U;
+uint32_t Hacl_Frodo1344_crypto_secretkeybytes = 43088U;
 
-uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = (uint32_t)21632U;
+uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = 21632U;
 
 uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[80U] = { 0U };
-  randombytes_((uint32_t)80U, coins);
+  randombytes_(80U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)32U;
-  uint8_t *z = coins + (uint32_t)64U;
+  uint8_t *seed_se = coins + 32U;
+  uint8_t *z = coins + 64U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  Hacl_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t e_matrix[10752U] = { 0U };
   uint8_t r[43008U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43008U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U,
-    (uint32_t)8U,
-    r + (uint32_t)21504U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(33U, shake_input_seed_se, 43008U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r + 21504U, e_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)1344U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)1344U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)1344U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)1344U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)10752U, uint16_t);
-  uint32_t slen1 = (uint32_t)43056U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(1344U, 1344U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(1344U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(1344U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(1344U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t);
+  uint32_t slen1 = 43056U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)32U, pk, (uint32_t)21520U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)80U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 32U * sizeof (uint8_t));
+  memcpy(sk_p + 32U, pk, 21520U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(21520U, pk, 32U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 80U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[32U] = { 0U };
-  randombytes_((uint32_t)32U, coins);
+  randombytes_(32U, coins);
   uint8_t seed_se_k[64U] = { 0U };
   uint8_t pkh_mu[64U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)32U, coins, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)64U, pkh_mu, (uint32_t)64U, seed_se_k);
+  Hacl_SHA3_shake256_hacl(21520U, pk, 32U, pkh_mu);
+  memcpy(pkh_mu + 32U, coins, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(64U, pkh_mu, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)32U;
+  uint8_t *k = seed_se_k + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[10752U] = { 0U };
   uint16_t ep_matrix[10752U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
+  uint8_t *c2 = ct + 21504U;
   uint16_t bp_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 1344U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)21664U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)21632U, k, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)32U, ss);
+  memcpy(shake_input_ss, ct, 21632U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 21632U, k, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 32U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(coins, 32U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[10752U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 21504U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 1344U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[32U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)1344U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(1344U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 1344U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 4U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[64U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)64U;
+  uint32_t pkh_mu_decode_len = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)43056U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)32U, mu_decode, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)64U, seed_se_k);
+  uint8_t *pkh = sk + 43056U;
+  memcpy(pkh_mu_decode, pkh, 32U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 32U, mu_decode, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)32U;
+  uint8_t *kp = seed_se_k + 32U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[10752U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)32U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
+  uint8_t *pk = sk + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)1344U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 1344U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 1344U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)21664U;
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)21632U, kp_s, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)32U, ss);
+  memcpy(ss_init, ct, 21632U * sizeof (uint8_t));
+  memcpy(ss_init + 21632U, kp_s, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, 32U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 32U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 32U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo64.c b/src/Hacl_Frodo64.c
index 575390e3..dcca214d 100644
--- a/src/Hacl_Frodo64.c
+++ b/src/Hacl_Frodo64.c
@@ -34,145 +34,111 @@
  */
 
 
-uint32_t Hacl_Frodo64_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo64_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo64_crypto_publickeybytes = (uint32_t)976U;
+uint32_t Hacl_Frodo64_crypto_publickeybytes = 976U;
 
-uint32_t Hacl_Frodo64_crypto_secretkeybytes = (uint32_t)2032U;
+uint32_t Hacl_Frodo64_crypto_secretkeybytes = 2032U;
 
-uint32_t Hacl_Frodo64_crypto_ciphertextbytes = (uint32_t)1080U;
+uint32_t Hacl_Frodo64_crypto_ciphertextbytes = 1080U;
 
 uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  Hacl_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t e_matrix[512U] = { 0U };
   uint8_t r[2048U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2048U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U,
-    (uint32_t)8U,
-    r + (uint32_t)1024U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 2048U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r + 1024U, e_matrix);
   uint16_t b_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)64U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)64U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b_matrix, b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)64U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)512U, uint16_t);
-  uint32_t slen1 = (uint32_t)2016U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(64U, 64U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(64U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(64U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(64U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 512U, uint16_t);
+  uint32_t slen1 = 2016U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)976U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 976U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(976U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_SHA3_shake128_hacl(976U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[512U] = { 0U };
   uint16_t ep_matrix[512U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
+  uint8_t *c2 = ct + 960U;
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 64U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)1080U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 1080U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 1080U, k, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -180,39 +146,30 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 960U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 64U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)64U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(64U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 64U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)2016U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 2016U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[512U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -221,80 +178,58 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)64U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 64U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 64U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)1080U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 1080U * sizeof (uint8_t));
+  memcpy(ss_init + 1080U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo640.c b/src/Hacl_Frodo640.c
index 54af36d8..c3c0d904 100644
--- a/src/Hacl_Frodo640.c
+++ b/src/Hacl_Frodo640.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo640_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo640_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo640_crypto_publickeybytes = (uint32_t)9616U;
+uint32_t Hacl_Frodo640_crypto_publickeybytes = 9616U;
 
-uint32_t Hacl_Frodo640_crypto_secretkeybytes = (uint32_t)19888U;
+uint32_t Hacl_Frodo640_crypto_secretkeybytes = 19888U;
 
-uint32_t Hacl_Frodo640_crypto_ciphertextbytes = (uint32_t)9720U;
+uint32_t Hacl_Frodo640_crypto_ciphertextbytes = 9720U;
 
 uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  Hacl_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t e_matrix[5120U] = { 0U };
   uint8_t r[20480U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20480U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U,
-    (uint32_t)8U,
-    r + (uint32_t)10240U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 20480U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r + 10240U, e_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)640U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)640U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)640U,
-    (uint32_t)8U,
-    (uint32_t)15U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)640U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)5120U, uint16_t);
-  uint32_t slen1 = (uint32_t)19872U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(640U, 640U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(640U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(640U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(640U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t);
+  uint32_t slen1 = 19872U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)9616U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 9616U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(9616U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_SHA3_shake128_hacl(9616U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[5120U] = { 0U };
   uint16_t ep_matrix[5120U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
+  uint8_t *c2 = ct + 9600U;
   uint16_t bp_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 640U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)9720U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 9720U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 9720U, k, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[5120U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 9600U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 640U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)640U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(640U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 640U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)19872U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 19872U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[5120U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,81 +175,59 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)640U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 640U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 640U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)9720U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 9720U * sizeof (uint8_t));
+  memcpy(ss_init + 9720U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo976.c b/src/Hacl_Frodo976.c
index 2e6aa6f0..617fc301 100644
--- a/src/Hacl_Frodo976.c
+++ b/src/Hacl_Frodo976.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo976_crypto_bytes = (uint32_t)24U;
+uint32_t Hacl_Frodo976_crypto_bytes = 24U;
 
-uint32_t Hacl_Frodo976_crypto_publickeybytes = (uint32_t)15632U;
+uint32_t Hacl_Frodo976_crypto_publickeybytes = 15632U;
 
-uint32_t Hacl_Frodo976_crypto_secretkeybytes = (uint32_t)31296U;
+uint32_t Hacl_Frodo976_crypto_secretkeybytes = 31296U;
 
-uint32_t Hacl_Frodo976_crypto_ciphertextbytes = (uint32_t)15744U;
+uint32_t Hacl_Frodo976_crypto_ciphertextbytes = 15744U;
 
 uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[64U] = { 0U };
-  randombytes_((uint32_t)64U, coins);
+  randombytes_(64U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)24U;
-  uint8_t *z = coins + (uint32_t)48U;
+  uint8_t *seed_se = coins + 24U;
+  uint8_t *z = coins + 48U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  Hacl_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t e_matrix[7808U] = { 0U };
   uint8_t r[31232U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31232U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U,
-    (uint32_t)8U,
-    r + (uint32_t)15616U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(25U, shake_input_seed_se, 31232U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r + 15616U, e_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)976U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)976U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)976U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)976U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)7808U, uint16_t);
-  uint32_t slen1 = (uint32_t)31272U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(976U, 976U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(976U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(976U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(976U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t);
+  uint32_t slen1 = 31272U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)24U, pk, (uint32_t)15632U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)64U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 24U * sizeof (uint8_t));
+  memcpy(sk_p + 24U, pk, 15632U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(15632U, pk, 24U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 64U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[24U] = { 0U };
-  randombytes_((uint32_t)24U, coins);
+  randombytes_(24U, coins);
   uint8_t seed_se_k[48U] = { 0U };
   uint8_t pkh_mu[48U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)24U, coins, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)48U, pkh_mu, (uint32_t)48U, seed_se_k);
+  Hacl_SHA3_shake256_hacl(15632U, pk, 24U, pkh_mu);
+  memcpy(pkh_mu + 24U, coins, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(48U, pkh_mu, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)24U;
+  uint8_t *k = seed_se_k + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[7808U] = { 0U };
   uint16_t ep_matrix[7808U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
+  uint8_t *c2 = ct + 15616U;
   uint16_t bp_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 976U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)15768U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)15744U, k, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)24U, ss);
+  memcpy(shake_input_ss, ct, 15744U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 15744U, k, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 24U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(coins, 24U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[7808U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 15616U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 976U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[24U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)976U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(976U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 976U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 3U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[48U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)48U;
+  uint32_t pkh_mu_decode_len = 48U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)31272U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)24U, mu_decode, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)48U, seed_se_k);
+  uint8_t *pkh = sk + 31272U;
+  memcpy(pkh_mu_decode, pkh, 24U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 24U, mu_decode, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)24U;
+  uint8_t *kp = seed_se_k + 24U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[7808U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)24U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
+  uint8_t *pk = sk + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)976U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 976U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 976U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[24U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+  for (uint32_t i = 0U; i < 24U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)15768U;
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)15744U, kp_s, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)24U, ss);
+  memcpy(ss_init, ct, 15744U * sizeof (uint8_t));
+  memcpy(ss_init + 15744U, kp_s, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, 24U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)24U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 24U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 24U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo_KEM.c b/src/Hacl_Frodo_KEM.c
index 4265ac0e..e0a65a47 100644
--- a/src/Hacl_Frodo_KEM.c
+++ b/src/Hacl_Frodo_KEM.c
@@ -30,6 +30,6 @@
 
 void randombytes_(uint32_t len, uint8_t *res)
 {
-  KRML_HOST_IGNORE(Lib_RandomBuffer_System_randombytes(res, len));
+  Lib_RandomBuffer_System_randombytes(res, len);
 }
 
diff --git a/src/Hacl_GenericField32.c b/src/Hacl_GenericField32.c
index 8bd08a00..f509e6d4 100644
--- a/src/Hacl_GenericField32.c
+++ b/src/Hacl_GenericField32.c
@@ -56,7 +56,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField32_field_modulus_check(uint32_t len, uint32_t *n)
 {
   uint32_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u32(len, n);
-  return m == (uint32_t)0xFFFFFFFFU;
+  return m == 0xFFFFFFFFU;
 }
 
 /**
@@ -82,7 +82,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -283,27 +283,27 @@ Hacl_GenericField32_exp_consttime(
   uint32_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -314,9 +314,9 @@ Hacl_GenericField32_exp_consttime(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -324,22 +324,22 @@ Hacl_GenericField32_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -350,29 +350,29 @@ Hacl_GenericField32_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+      memcpy(resM, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = resM;
           uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -388,24 +388,24 @@ Hacl_GenericField32_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = tmp0;
           uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -450,7 +450,7 @@ Hacl_GenericField32_exp_vartime(
   uint32_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
@@ -460,13 +460,13 @@ Hacl_GenericField32_exp_vartime(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -478,22 +478,22 @@ Hacl_GenericField32_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -504,21 +504,21 @@ Hacl_GenericField32_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
       uint32_t bits_l32 = bits_c;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -532,16 +532,16 @@ Hacl_GenericField32_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
       uint32_t bits_l32 = bits_l;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -574,38 +574,33 @@ Hacl_GenericField32_inverse(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -614,7 +609,7 @@ Hacl_GenericField32_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField32_exp_vartime(k, aM, k1.len * (uint32_t)32U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField32_exp_vartime(k, aM, k1.len * 32U, n2, aInvM);
 }
 
diff --git a/src/Hacl_GenericField64.c b/src/Hacl_GenericField64.c
index 7c11d3b7..3f291d36 100644
--- a/src/Hacl_GenericField64.c
+++ b/src/Hacl_GenericField64.c
@@ -55,7 +55,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField64_field_modulus_check(uint32_t len, uint64_t *n)
 {
   uint64_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u64(len, n);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -81,7 +81,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -282,27 +282,27 @@ Hacl_GenericField64_exp_consttime(
   uint64_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -313,9 +313,9 @@ Hacl_GenericField64_exp_consttime(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -323,22 +323,22 @@ Hacl_GenericField64_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -349,29 +349,29 @@ Hacl_GenericField64_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+      memcpy(resM, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = resM;
           uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -387,24 +387,24 @@ Hacl_GenericField64_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = tmp0;
           uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -449,7 +449,7 @@ Hacl_GenericField64_exp_vartime(
   uint64_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
@@ -459,13 +459,13 @@ Hacl_GenericField64_exp_vartime(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -477,22 +477,22 @@ Hacl_GenericField64_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -503,21 +503,21 @@ Hacl_GenericField64_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
       uint32_t bits_l32 = (uint32_t)bits_c;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -531,16 +531,16 @@ Hacl_GenericField64_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
       uint32_t bits_l32 = (uint32_t)bits_l;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -573,38 +573,33 @@ Hacl_GenericField64_inverse(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -613,7 +608,7 @@ Hacl_GenericField64_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField64_exp_vartime(k, aM, k1.len * (uint32_t)64U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField64_exp_vartime(k, aM, k1.len * 64U, n2, aInvM);
 }
 
diff --git a/src/Hacl_HKDF.c b/src/Hacl_HKDF.c
index 6148337c..027b719f 100644
--- a/src/Hacl_HKDF.c
+++ b/src/Hacl_HKDF.c
@@ -45,39 +45,39 @@ Hacl_HKDF_expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -125,39 +125,39 @@ Hacl_HKDF_expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -205,39 +205,39 @@ Hacl_HKDF_expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -285,39 +285,39 @@ Hacl_HKDF_expand_blake2s_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -365,39 +365,39 @@ Hacl_HKDF_expand_blake2b_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/Hacl_HKDF_Blake2b_256.c b/src/Hacl_HKDF_Blake2b_256.c
index 0d28292a..fe89115d 100644
--- a/src/Hacl_HKDF_Blake2b_256.c
+++ b/src/Hacl_HKDF_Blake2b_256.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2b_256_expand_blake2b_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/Hacl_HKDF_Blake2s_128.c b/src/Hacl_HKDF_Blake2s_128.c
index ec1e727e..4c9e9450 100644
--- a/src/Hacl_HKDF_Blake2s_128.c
+++ b/src/Hacl_HKDF_Blake2s_128.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2s_128_expand_blake2s_128(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/Hacl_HMAC.c b/src/Hacl_HMAC.c
index 7b4b36f0..b766775c 100644
--- a/src/Hacl_HMAC.c
+++ b/src/Hacl_HMAC.c
@@ -45,23 +45,23 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -71,42 +71,37 @@ Hacl_HMAC_legacy_compute_sha1(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -118,25 +113,21 @@ Hacl_HMAC_legacy_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
+    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, 1U);
     Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -147,12 +138,9 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
+  Hacl_Hash_SHA1_legacy_update_multi(s, opad, 1U);
   Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
+  Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
 }
 
@@ -171,23 +159,23 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -197,48 +185,45 @@ Hacl_HMAC_compute_sha2_256(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_SHA2_Scalar32_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -250,9 +235,9 @@ Hacl_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, ipad, s);
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, ipad, s);
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
       + (uint64_t)full_blocks_len
       + (uint64_t)rem_len,
       rem_len,
@@ -262,15 +247,14 @@ Hacl_HMAC_compute_sha2_256(
   Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -281,9 +265,9 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, opad, s);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, opad, s);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
     + (uint64_t)full_blocks_len
     + (uint64_t)rem_len,
     rem_len,
@@ -307,23 +291,23 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -333,49 +317,49 @@ Hacl_HMAC_compute_sha2_384(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -387,9 +371,9 @@ Hacl_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -399,15 +383,14 @@ Hacl_HMAC_compute_sha2_384(
   Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -418,9 +401,9 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -444,23 +427,23 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -470,49 +453,49 @@ Hacl_HMAC_compute_sha2_512(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -524,9 +507,9 @@ Hacl_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -536,15 +519,14 @@ Hacl_HMAC_compute_sha2_512(
   Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -555,9 +537,9 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -581,66 +563,66 @@ Hacl_HMAC_compute_blake2s_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2s_32_blake2s(32U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Blake2s_32_blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -653,9 +635,9 @@ Hacl_HMAC_compute_blake2s_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
@@ -665,22 +647,21 @@ Hacl_HMAC_compute_blake2s_32(
     Hacl_Blake2s_32_blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Blake2s_32_blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -692,9 +673,9 @@ Hacl_HMAC_compute_blake2s_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
@@ -704,10 +685,10 @@ Hacl_HMAC_compute_blake2s_32(
   Hacl_Blake2s_32_blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst, s0);
 }
 
 /**
@@ -725,71 +706,71 @@ Hacl_HMAC_compute_blake2b_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2b_32_blake2b(64U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -802,14 +783,14 @@ Hacl_HMAC_compute_blake2b_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -819,23 +800,22 @@ Hacl_HMAC_compute_blake2b_32(
     Hacl_Blake2b_32_blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Blake2b_32_blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -847,14 +827,14 @@ Hacl_HMAC_compute_blake2b_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -864,10 +844,10 @@ Hacl_HMAC_compute_blake2b_32(
   Hacl_Blake2b_32_blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst, s0);
 }
 
diff --git a/src/Hacl_HMAC_Blake2b_256.c b/src/Hacl_HMAC_Blake2b_256.c
index 71f75415..d9a0bef2 100644
--- a/src/Hacl_HMAC_Blake2b_256.c
+++ b/src/Hacl_HMAC_Blake2b_256.c
@@ -43,71 +43,71 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_256_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2b_256_blake2b(64U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_256_blake2b_init(s, 0U, 64U);
   Lib_IntVector_Intrinsics_vec256 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last((uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -120,14 +120,14 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -137,23 +137,22 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
     Hacl_Blake2b_256_blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Blake2b_256_blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_256_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Blake2b_256_blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -165,14 +164,14 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+  Hacl_Blake2b_256_blake2b_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Blake2b_256_blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -182,10 +181,10 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   Hacl_Blake2b_256_blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Blake2b_256_blake2b_finish(64U, dst, s0);
 }
 
diff --git a/src/Hacl_HMAC_Blake2s_128.c b/src/Hacl_HMAC_Blake2s_128.c
index bce00309..93a911a7 100644
--- a/src/Hacl_HMAC_Blake2s_128.c
+++ b/src/Hacl_HMAC_Blake2s_128.c
@@ -42,66 +42,66 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_128_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2s_128_blake2s(32U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 s[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_128_blake2s_init(s, 0U, 32U);
   Lib_IntVector_Intrinsics_vec128 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Blake2s_128_blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -114,9 +114,9 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Blake2s_128_blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Blake2s_128_blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
@@ -126,22 +126,21 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
     Hacl_Blake2s_128_blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Blake2s_128_blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_128_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Blake2s_128_blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -153,9 +152,9 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Blake2s_128_blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Blake2s_128_blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
@@ -165,9 +164,9 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   Hacl_Blake2s_128_blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Blake2s_128_blake2s_finish(32U, dst, s0);
 }
 
diff --git a/src/Hacl_HMAC_DRBG.c b/src/Hacl_HMAC_DRBG.c
index 0a09aaed..366f1b1a 100644
--- a/src/Hacl_HMAC_DRBG.c
+++ b/src/Hacl_HMAC_DRBG.c
@@ -25,15 +25,15 @@
 
 #include "Hacl_HMAC_DRBG.h"
 
-uint32_t Hacl_HMAC_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t Hacl_HMAC_DRBG_reseed_interval = 1024U;
 
-uint32_t Hacl_HMAC_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_output_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_additional_input_length = 65536U;
 
 /**
 Return the minimal entropy input length of the desired hash function.
@@ -46,19 +46,19 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -71,8 +71,8 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 bool
 Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -92,25 +92,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         k = buf;
         break;
       }
@@ -125,25 +125,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         v = buf;
         break;
       }
@@ -154,7 +154,7 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
       }
   }
   uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return ((Hacl_HMAC_DRBG_state){ .k = k, .v = v, .reseed_counter = ctr });
 }
 
@@ -200,45 +200,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 20U * sizeof (uint8_t));
+        memset(v, 1U, 20U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         break;
       }
@@ -258,45 +256,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 32U * sizeof (uint8_t));
+        memset(v, 1U, 32U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         break;
       }
@@ -316,45 +312,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 48U * sizeof (uint8_t));
+        memset(v, 1U, 48U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         break;
       }
@@ -374,45 +368,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 64U * sizeof (uint8_t));
+        memset(v, 1U, 64U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         break;
       }
@@ -460,42 +452,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 21U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 21U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -512,42 +504,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 33U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 33U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -564,42 +556,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 49U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 49U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -616,42 +608,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 65U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 65U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     default:
@@ -693,93 +685,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)21U + additional_input_len;
+          uint32_t input_len = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[20U] = (uint8_t)0U;
-          Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-          Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[20U] = 0U;
+          Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+          Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+          memcpy(k, k_, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+            uint32_t input_len0 = 21U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 20U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)21U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[20U] = (uint8_t)1U;
-            Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-            Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-            memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+            input[20U] = 1U;
+            Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+            Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+            memcpy(k, k_0, 20U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)20U;
+        uint32_t max = n / 20U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+          Hacl_HMAC_legacy_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)20U < n)
+        if (max * 20U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)20U;
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 20U;
+          Hacl_HMAC_legacy_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)21U + additional_input_len;
+        uint32_t input_len = 21U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+          uint32_t input_len0 = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -791,93 +777,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)33U + additional_input_len;
+          uint32_t input_len = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[32U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-          Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[32U] = 0U;
+          Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+          Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+          memcpy(k, k_, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+            uint32_t input_len0 = 33U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 32U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)33U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[32U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-            Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-            memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+            input[32U] = 1U;
+            Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+            Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+            memcpy(k, k_0, 32U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)32U;
+        uint32_t max = n / 32U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)32U < n)
+        if (max * 32U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)32U;
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 32U;
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)33U + additional_input_len;
+        uint32_t input_len = 33U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+          uint32_t input_len0 = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -889,93 +869,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)49U + additional_input_len;
+          uint32_t input_len = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[48U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-          Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[48U] = 0U;
+          Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+          Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+          memcpy(k, k_, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+            uint32_t input_len0 = 49U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 48U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)49U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[48U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-            Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-            memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+            input[48U] = 1U;
+            Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+            Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+            memcpy(k, k_0, 48U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)48U;
+        uint32_t max = n / 48U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)48U < n)
+        if (max * 48U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)48U;
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 48U;
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)49U + additional_input_len;
+        uint32_t input_len = 49U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+          uint32_t input_len0 = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -987,93 +961,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)65U + additional_input_len;
+          uint32_t input_len = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[64U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-          Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[64U] = 0U;
+          Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+          Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+          memcpy(k, k_, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+            uint32_t input_len0 = 65U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 64U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)65U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[64U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-            Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-            memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+            input[64U] = 1U;
+            Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+            Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+            memcpy(k, k_0, 64U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)64U;
+        uint32_t max = n / 64U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)64U < n)
+        if (max * 64U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)64U;
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 64U;
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)65U + additional_input_len;
+        uint32_t input_len = 65U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+          uint32_t input_len0 = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     default:
@@ -1086,7 +1054,7 @@ Hacl_HMAC_DRBG_generate(
 
 void Hacl_HMAC_DRBG_free(Spec_Hash_Definitions_hash_alg uu___, Hacl_HMAC_DRBG_state s)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
diff --git a/src/Hacl_HPKE_Curve51_CP128_SHA256.c b/src/Hacl_HPKE_Curve51_CP128_SHA256.c
index 5814ae67..2c7f5c7c 100644
--- a/src/Hacl_HPKE_Curve51_CP128_SHA256.c
+++ b/src/Hacl_HPKE_Curve51_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP128_SHA512.c b/src/Hacl_HPKE_Curve51_CP128_SHA512.c
index c6aff2e5..f9be34c9 100644
--- a/src/Hacl_HPKE_Curve51_CP128_SHA512.c
+++ b/src/Hacl_HPKE_Curve51_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP256_SHA256.c b/src/Hacl_HPKE_Curve51_CP256_SHA256.c
index 4c448589..aef25086 100644
--- a/src/Hacl_HPKE_Curve51_CP256_SHA256.c
+++ b/src/Hacl_HPKE_Curve51_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP256_SHA512.c b/src/Hacl_HPKE_Curve51_CP256_SHA512.c
index 1ee26ea0..90c4bf06 100644
--- a/src/Hacl_HPKE_Curve51_CP256_SHA512.c
+++ b/src/Hacl_HPKE_Curve51_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP32_SHA256.c b/src/Hacl_HPKE_Curve51_CP32_SHA256.c
index bc59f64a..903d3feb 100644
--- a/src/Hacl_HPKE_Curve51_CP32_SHA256.c
+++ b/src/Hacl_HPKE_Curve51_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP32_SHA512.c b/src/Hacl_HPKE_Curve51_CP32_SHA512.c
index 0314c71c..2b3933db 100644
--- a/src/Hacl_HPKE_Curve51_CP32_SHA512.c
+++ b/src/Hacl_HPKE_Curve51_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP128_SHA256.c b/src/Hacl_HPKE_Curve64_CP128_SHA256.c
index c22d5a64..d7f21be9 100644
--- a/src/Hacl_HPKE_Curve64_CP128_SHA256.c
+++ b/src/Hacl_HPKE_Curve64_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP128_SHA512.c b/src/Hacl_HPKE_Curve64_CP128_SHA512.c
index d01bc1f8..cc3faff0 100644
--- a/src/Hacl_HPKE_Curve64_CP128_SHA512.c
+++ b/src/Hacl_HPKE_Curve64_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP256_SHA256.c b/src/Hacl_HPKE_Curve64_CP256_SHA256.c
index 6de7db47..f1eaa458 100644
--- a/src/Hacl_HPKE_Curve64_CP256_SHA256.c
+++ b/src/Hacl_HPKE_Curve64_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP256_SHA512.c b/src/Hacl_HPKE_Curve64_CP256_SHA512.c
index 146b64eb..4e768938 100644
--- a/src/Hacl_HPKE_Curve64_CP256_SHA512.c
+++ b/src/Hacl_HPKE_Curve64_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP32_SHA256.c b/src/Hacl_HPKE_Curve64_CP32_SHA256.c
index c7f168bb..71db119b 100644
--- a/src/Hacl_HPKE_Curve64_CP32_SHA256.c
+++ b/src/Hacl_HPKE_Curve64_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP32_SHA512.c b/src/Hacl_HPKE_Curve64_CP32_SHA512.c
index 39e1a267..84d44811 100644
--- a/src/Hacl_HPKE_Curve64_CP32_SHA512.c
+++ b/src/Hacl_HPKE_Curve64_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_P256_CP128_SHA256.c b/src/Hacl_HPKE_P256_CP128_SHA256.c
index 5320f1f5..5b3e2fe0 100644
--- a/src/Hacl_HPKE_P256_CP128_SHA256.c
+++ b/src/Hacl_HPKE_P256_CP128_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp1[len0];
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp2[len1];
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp3[len2];
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp4[len3];
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp5[len4];
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,19 +565,19 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -643,20 +588,20 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +619,7 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +630,42 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_P256_CP256_SHA256.c b/src/Hacl_HPKE_P256_CP256_SHA256.c
index 3603cb42..88a05c6f 100644
--- a/src/Hacl_HPKE_P256_CP256_SHA256.c
+++ b/src/Hacl_HPKE_P256_CP256_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp1[len0];
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp2[len1];
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp3[len2];
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp4[len3];
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp5[len4];
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,19 +565,19 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -643,20 +588,20 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +619,7 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +630,42 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_P256_CP32_SHA256.c b/src/Hacl_HPKE_P256_CP32_SHA256.c
index 5297dd2c..b6d2df3f 100644
--- a/src/Hacl_HPKE_P256_CP32_SHA256.c
+++ b/src/Hacl_HPKE_P256_CP32_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp1[len0];
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp2[len1];
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp3[len2];
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp4[len3];
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp5[len4];
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,19 +565,19 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -643,20 +588,20 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +619,7 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +630,42 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Hash_Base.c b/src/Hacl_Hash_Base.c
index 40796f14..02d893e3 100644
--- a/src/Hacl_Hash_Base.c
+++ b/src/Hacl_Hash_Base.c
@@ -31,27 +31,27 @@ uint32_t Hacl_Hash_Definitions_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -67,59 +67,59 @@ uint32_t Hacl_Hash_Definitions_block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -135,27 +135,27 @@ uint32_t Hacl_Hash_Definitions_hash_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)5U;
+        return 5U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)7U;
+        return 7U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)6U;
+        return 6U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -171,51 +171,51 @@ uint32_t Hacl_Hash_Definitions_hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
diff --git a/src/Hacl_Hash_Blake2.c b/src/Hacl_Hash_Blake2.c
index aecc6165..44c2a29f 100644
--- a/src/Hacl_Hash_Blake2.c
+++ b/src/Hacl_Hash_Blake2.c
@@ -39,11 +39,11 @@ blake2b_update_block(
 {
   uint64_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
+    uint8_t *bj = d + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -52,52 +52,52 @@ blake2b_update_block(
   uint64_t wv_14;
   if (flag)
   {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
   }
   else
   {
-    wv_14 = (uint64_t)0U;
+    wv_14 = 0ULL;
   }
-  uint64_t wv_15 = (uint64_t)0U;
+  uint64_t wv_15 = 0ULL;
   mask[0U] = FStar_UInt128_uint128_to_uint64(totlen);
-  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U));
+  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U));
   mask[2U] = wv_14;
   mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint64_t));
-  uint64_t *wv3 = wv + (uint32_t)12U;
+  memcpy(wv, hash, 16U * sizeof (uint64_t));
+  uint64_t *wv3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = wv3;
     uint64_t x = wv3[i] ^ mask[i];
     os[i] = x;);
   KRML_MAYBE_FOR12(i0,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
     uint64_t m_st[16U] = { 0U };
     uint64_t *r0 = m_st;
-    uint64_t *r1 = m_st + (uint32_t)4U;
-    uint64_t *r20 = m_st + (uint32_t)8U;
-    uint64_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    uint64_t *r1 = m_st + 4U;
+    uint64_t *r20 = m_st + 8U;
+    uint64_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     uint64_t uu____0 = m_w[s2];
     uint64_t uu____1 = m_w[s4];
     uint64_t uu____2 = m_w[s6];
@@ -127,138 +127,138 @@ blake2b_update_block(
     r30[2U] = uu____10;
     r30[3U] = uu____11;
     uint64_t *x = m_st;
-    uint64_t *y = m_st + (uint32_t)4U;
-    uint64_t *z = m_st + (uint32_t)8U;
-    uint64_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint64_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b0 = wv + b0 * (uint32_t)4U;
+    uint64_t *y = m_st + 4U;
+    uint64_t *z = m_st + 8U;
+    uint64_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint64_t *wv_a0 = wv + a * 4U;
+    uint64_t *wv_b0 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a0;
       uint64_t x1 = wv_a0[i] + wv_b0[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a0;
       uint64_t x1 = wv_a0[i] + x[i];
       os[i] = x1;);
-    uint64_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b1 = wv + a * (uint32_t)4U;
+    uint64_t *wv_a1 = wv + d10 * 4U;
+    uint64_t *wv_b1 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a1;
       uint64_t x1 = wv_a1[i] ^ wv_b1[i];
       os[i] = x1;);
     uint64_t *r10 = wv_a1;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r10;
       uint64_t x1 = r10[i];
-      uint64_t x10 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
+      uint64_t x10 = x1 >> 32U | x1 << 32U;
       os[i] = x10;);
-    uint64_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b2 = wv + d10 * (uint32_t)4U;
+    uint64_t *wv_a2 = wv + c0 * 4U;
+    uint64_t *wv_b2 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a2;
       uint64_t x1 = wv_a2[i] + wv_b2[i];
       os[i] = x1;);
-    uint64_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b3 = wv + c0 * (uint32_t)4U;
+    uint64_t *wv_a3 = wv + b0 * 4U;
+    uint64_t *wv_b3 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a3;
       uint64_t x1 = wv_a3[i] ^ wv_b3[i];
       os[i] = x1;);
     uint64_t *r12 = wv_a3;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r12;
       uint64_t x1 = r12[i];
-      uint64_t x10 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
+      uint64_t x10 = x1 >> 24U | x1 << 40U;
       os[i] = x10;);
-    uint64_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b4 = wv + b0 * (uint32_t)4U;
+    uint64_t *wv_a4 = wv + a * 4U;
+    uint64_t *wv_b4 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a4;
       uint64_t x1 = wv_a4[i] + wv_b4[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a4;
       uint64_t x1 = wv_a4[i] + y[i];
       os[i] = x1;);
-    uint64_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b5 = wv + a * (uint32_t)4U;
+    uint64_t *wv_a5 = wv + d10 * 4U;
+    uint64_t *wv_b5 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a5;
       uint64_t x1 = wv_a5[i] ^ wv_b5[i];
       os[i] = x1;);
     uint64_t *r13 = wv_a5;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r13;
       uint64_t x1 = r13[i];
-      uint64_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
+      uint64_t x10 = x1 >> 16U | x1 << 48U;
       os[i] = x10;);
-    uint64_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b6 = wv + d10 * (uint32_t)4U;
+    uint64_t *wv_a6 = wv + c0 * 4U;
+    uint64_t *wv_b6 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a6;
       uint64_t x1 = wv_a6[i] + wv_b6[i];
       os[i] = x1;);
-    uint64_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b7 = wv + c0 * (uint32_t)4U;
+    uint64_t *wv_a7 = wv + b0 * 4U;
+    uint64_t *wv_b7 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a7;
       uint64_t x1 = wv_a7[i] ^ wv_b7[i];
       os[i] = x1;);
     uint64_t *r14 = wv_a7;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r14;
       uint64_t x1 = r14[i];
-      uint64_t x10 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
+      uint64_t x10 = x1 >> 63U | x1 << 1U;
       os[i] = x10;);
-    uint64_t *r15 = wv + (uint32_t)4U;
-    uint64_t *r21 = wv + (uint32_t)8U;
-    uint64_t *r31 = wv + (uint32_t)12U;
+    uint64_t *r15 = wv + 4U;
+    uint64_t *r21 = wv + 8U;
+    uint64_t *r31 = wv + 12U;
     uint64_t *r110 = r15;
     uint64_t x00 = r110[1U];
     uint64_t x10 = r110[2U];
@@ -286,135 +286,135 @@ blake2b_update_block(
     r112[1U] = x12;
     r112[2U] = x22;
     r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint64_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b8 = wv + b * (uint32_t)4U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint64_t *wv_a = wv + a0 * 4U;
+    uint64_t *wv_b8 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a;
       uint64_t x1 = wv_a[i] + wv_b8[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a;
       uint64_t x1 = wv_a[i] + z[i];
       os[i] = x1;);
-    uint64_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b9 = wv + a0 * (uint32_t)4U;
+    uint64_t *wv_a8 = wv + d1 * 4U;
+    uint64_t *wv_b9 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a8;
       uint64_t x1 = wv_a8[i] ^ wv_b9[i];
       os[i] = x1;);
     uint64_t *r16 = wv_a8;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r16;
       uint64_t x1 = r16[i];
-      uint64_t x13 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
+      uint64_t x13 = x1 >> 32U | x1 << 32U;
       os[i] = x13;);
-    uint64_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b10 = wv + d1 * (uint32_t)4U;
+    uint64_t *wv_a9 = wv + c * 4U;
+    uint64_t *wv_b10 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a9;
       uint64_t x1 = wv_a9[i] + wv_b10[i];
       os[i] = x1;);
-    uint64_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b11 = wv + c * (uint32_t)4U;
+    uint64_t *wv_a10 = wv + b * 4U;
+    uint64_t *wv_b11 = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a10;
       uint64_t x1 = wv_a10[i] ^ wv_b11[i];
       os[i] = x1;);
     uint64_t *r17 = wv_a10;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r17;
       uint64_t x1 = r17[i];
-      uint64_t x13 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
+      uint64_t x13 = x1 >> 24U | x1 << 40U;
       os[i] = x13;);
-    uint64_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b12 = wv + b * (uint32_t)4U;
+    uint64_t *wv_a11 = wv + a0 * 4U;
+    uint64_t *wv_b12 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a11;
       uint64_t x1 = wv_a11[i] + wv_b12[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a11;
       uint64_t x1 = wv_a11[i] + w[i];
       os[i] = x1;);
-    uint64_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b13 = wv + a0 * (uint32_t)4U;
+    uint64_t *wv_a12 = wv + d1 * 4U;
+    uint64_t *wv_b13 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a12;
       uint64_t x1 = wv_a12[i] ^ wv_b13[i];
       os[i] = x1;);
     uint64_t *r18 = wv_a12;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r18;
       uint64_t x1 = r18[i];
-      uint64_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
+      uint64_t x13 = x1 >> 16U | x1 << 48U;
       os[i] = x13;);
-    uint64_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b14 = wv + d1 * (uint32_t)4U;
+    uint64_t *wv_a13 = wv + c * 4U;
+    uint64_t *wv_b14 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a13;
       uint64_t x1 = wv_a13[i] + wv_b14[i];
       os[i] = x1;);
-    uint64_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b = wv + c * (uint32_t)4U;
+    uint64_t *wv_a14 = wv + b * 4U;
+    uint64_t *wv_b = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a14;
       uint64_t x1 = wv_a14[i] ^ wv_b[i];
       os[i] = x1;);
     uint64_t *r19 = wv_a14;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r19;
       uint64_t x1 = r19[i];
-      uint64_t x13 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
+      uint64_t x13 = x1 >> 63U | x1 << 1U;
       os[i] = x13;);
-    uint64_t *r113 = wv + (uint32_t)4U;
-    uint64_t *r2 = wv + (uint32_t)8U;
-    uint64_t *r3 = wv + (uint32_t)12U;
+    uint64_t *r113 = wv + 4U;
+    uint64_t *r2 = wv + 8U;
+    uint64_t *r3 = wv + 12U;
     uint64_t *r11 = r113;
     uint64_t x03 = r11[3U];
     uint64_t x13 = r11[0U];
@@ -443,36 +443,36 @@ blake2b_update_block(
     r115[2U] = x2;
     r115[3U] = x3;);
   uint64_t *s0 = hash;
-  uint64_t *s1 = hash + (uint32_t)4U;
+  uint64_t *s1 = hash + 4U;
   uint64_t *r0 = wv;
-  uint64_t *r1 = wv + (uint32_t)4U;
-  uint64_t *r2 = wv + (uint32_t)8U;
-  uint64_t *r3 = wv + (uint32_t)12U;
+  uint64_t *r1 = wv + 4U;
+  uint64_t *r2 = wv + 8U;
+  uint64_t *r3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s0;
     uint64_t x = s0[i] ^ r0[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s0;
     uint64_t x = s0[i] ^ r2[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s1;
     uint64_t x = s1[i] ^ r1[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s1;
     uint64_t x = s1[i] ^ r3[i];
     os[i] = x;);
@@ -481,9 +481,9 @@ blake2b_update_block(
 void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
 {
   uint64_t *r0 = hash;
-  uint64_t *r1 = hash + (uint32_t)4U;
-  uint64_t *r2 = hash + (uint32_t)8U;
-  uint64_t *r3 = hash + (uint32_t)12U;
+  uint64_t *r1 = hash + 4U;
+  uint64_t *r2 = hash + 8U;
+  uint64_t *r3 = hash + 12U;
   uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
   uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
   uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
@@ -500,8 +500,8 @@ void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
   r3[1U] = iv5;
   r3[2U] = iv6;
   r3[3U] = iv7;
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
   r0[0U] = iv0_;
   r0[1U] = iv1;
   r0[2U] = iv2;
@@ -521,10 +521,10 @@ Hacl_Blake2b_32_blake2b_update_key(
   uint32_t ll
 )
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
   uint8_t b[128U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2b_update_block(wv, hash, true, lb, b);
   }
@@ -532,7 +532,7 @@ Hacl_Blake2b_32_blake2b_update_key(
   {
     blake2b_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 void
@@ -545,14 +545,14 @@ Hacl_Blake2b_32_blake2b_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
     FStar_UInt128_uint128
     totlen =
       FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
     blake2b_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -573,7 +573,7 @@ Hacl_Blake2b_32_blake2b_update_last(
   FStar_UInt128_uint128
   totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
   blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 static void
@@ -585,13 +585,13 @@ blake2b_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 128U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -607,44 +607,32 @@ blake2b_update_blocks(
 static inline void
 blake2b_update(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
   {
     Hacl_Blake2b_32_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2b_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
+  blake2b_update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
 }
 
 void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash)
 {
   uint8_t b[64U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
+  uint8_t *second = b + 32U;
   uint64_t *row0 = hash;
-  uint64_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(first + i * (uint32_t)8U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(second + i * (uint32_t)8U, row1[i]););
+  uint64_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(first + i * 8U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(second + i * 8U, row1[i]););
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 /**
@@ -672,13 +660,13 @@ Hacl_Blake2b_32_blake2b(
   Hacl_Blake2b_32_blake2b_init(b, kk, nn);
   blake2b_update(b1, b, kk, k, ll, d);
   Hacl_Blake2b_32_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint64_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint64_t);
+  Lib_Memzero0_memzero(b1, 16U, uint64_t);
+  Lib_Memzero0_memzero(b, 16U, uint64_t);
 }
 
 uint64_t *Hacl_Blake2b_32_blake2b_malloc(void)
 {
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
   return buf;
 }
 
@@ -687,11 +675,11 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
 {
   uint32_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
+    uint8_t *bj = d + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -700,52 +688,52 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
   uint32_t wv_14;
   if (flag)
   {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFU;
   }
   else
   {
-    wv_14 = (uint32_t)0U;
+    wv_14 = 0U;
   }
-  uint32_t wv_15 = (uint32_t)0U;
+  uint32_t wv_15 = 0U;
   mask[0U] = (uint32_t)totlen;
-  mask[1U] = (uint32_t)(totlen >> (uint32_t)32U);
+  mask[1U] = (uint32_t)(totlen >> 32U);
   mask[2U] = wv_14;
   mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t *wv3 = wv + (uint32_t)12U;
+  memcpy(wv, hash, 16U * sizeof (uint32_t));
+  uint32_t *wv3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = wv3;
     uint32_t x = wv3[i] ^ mask[i];
     os[i] = x;);
   KRML_MAYBE_FOR10(i0,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
     uint32_t m_st[16U] = { 0U };
     uint32_t *r0 = m_st;
-    uint32_t *r1 = m_st + (uint32_t)4U;
-    uint32_t *r20 = m_st + (uint32_t)8U;
-    uint32_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    uint32_t *r1 = m_st + 4U;
+    uint32_t *r20 = m_st + 8U;
+    uint32_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     uint32_t uu____0 = m_w[s2];
     uint32_t uu____1 = m_w[s4];
     uint32_t uu____2 = m_w[s6];
@@ -775,138 +763,138 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
     r30[2U] = uu____10;
     r30[3U] = uu____11;
     uint32_t *x = m_st;
-    uint32_t *y = m_st + (uint32_t)4U;
-    uint32_t *z = m_st + (uint32_t)8U;
-    uint32_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint32_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b0 = wv + b0 * (uint32_t)4U;
+    uint32_t *y = m_st + 4U;
+    uint32_t *z = m_st + 8U;
+    uint32_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint32_t *wv_a0 = wv + a * 4U;
+    uint32_t *wv_b0 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a0;
       uint32_t x1 = wv_a0[i] + wv_b0[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a0;
       uint32_t x1 = wv_a0[i] + x[i];
       os[i] = x1;);
-    uint32_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b1 = wv + a * (uint32_t)4U;
+    uint32_t *wv_a1 = wv + d10 * 4U;
+    uint32_t *wv_b1 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a1;
       uint32_t x1 = wv_a1[i] ^ wv_b1[i];
       os[i] = x1;);
     uint32_t *r10 = wv_a1;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r10;
       uint32_t x1 = r10[i];
-      uint32_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
+      uint32_t x10 = x1 >> 16U | x1 << 16U;
       os[i] = x10;);
-    uint32_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b2 = wv + d10 * (uint32_t)4U;
+    uint32_t *wv_a2 = wv + c0 * 4U;
+    uint32_t *wv_b2 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a2;
       uint32_t x1 = wv_a2[i] + wv_b2[i];
       os[i] = x1;);
-    uint32_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b3 = wv + c0 * (uint32_t)4U;
+    uint32_t *wv_a3 = wv + b0 * 4U;
+    uint32_t *wv_b3 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a3;
       uint32_t x1 = wv_a3[i] ^ wv_b3[i];
       os[i] = x1;);
     uint32_t *r12 = wv_a3;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r12;
       uint32_t x1 = r12[i];
-      uint32_t x10 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
+      uint32_t x10 = x1 >> 12U | x1 << 20U;
       os[i] = x10;);
-    uint32_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b4 = wv + b0 * (uint32_t)4U;
+    uint32_t *wv_a4 = wv + a * 4U;
+    uint32_t *wv_b4 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a4;
       uint32_t x1 = wv_a4[i] + wv_b4[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a4;
       uint32_t x1 = wv_a4[i] + y[i];
       os[i] = x1;);
-    uint32_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b5 = wv + a * (uint32_t)4U;
+    uint32_t *wv_a5 = wv + d10 * 4U;
+    uint32_t *wv_b5 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a5;
       uint32_t x1 = wv_a5[i] ^ wv_b5[i];
       os[i] = x1;);
     uint32_t *r13 = wv_a5;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r13;
       uint32_t x1 = r13[i];
-      uint32_t x10 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
+      uint32_t x10 = x1 >> 8U | x1 << 24U;
       os[i] = x10;);
-    uint32_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b6 = wv + d10 * (uint32_t)4U;
+    uint32_t *wv_a6 = wv + c0 * 4U;
+    uint32_t *wv_b6 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a6;
       uint32_t x1 = wv_a6[i] + wv_b6[i];
       os[i] = x1;);
-    uint32_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b7 = wv + c0 * (uint32_t)4U;
+    uint32_t *wv_a7 = wv + b0 * 4U;
+    uint32_t *wv_b7 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a7;
       uint32_t x1 = wv_a7[i] ^ wv_b7[i];
       os[i] = x1;);
     uint32_t *r14 = wv_a7;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r14;
       uint32_t x1 = r14[i];
-      uint32_t x10 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
+      uint32_t x10 = x1 >> 7U | x1 << 25U;
       os[i] = x10;);
-    uint32_t *r15 = wv + (uint32_t)4U;
-    uint32_t *r21 = wv + (uint32_t)8U;
-    uint32_t *r31 = wv + (uint32_t)12U;
+    uint32_t *r15 = wv + 4U;
+    uint32_t *r21 = wv + 8U;
+    uint32_t *r31 = wv + 12U;
     uint32_t *r110 = r15;
     uint32_t x00 = r110[1U];
     uint32_t x10 = r110[2U];
@@ -934,135 +922,135 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
     r112[1U] = x12;
     r112[2U] = x22;
     r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint32_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b8 = wv + b * (uint32_t)4U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint32_t *wv_a = wv + a0 * 4U;
+    uint32_t *wv_b8 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a;
       uint32_t x1 = wv_a[i] + wv_b8[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a;
       uint32_t x1 = wv_a[i] + z[i];
       os[i] = x1;);
-    uint32_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b9 = wv + a0 * (uint32_t)4U;
+    uint32_t *wv_a8 = wv + d1 * 4U;
+    uint32_t *wv_b9 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a8;
       uint32_t x1 = wv_a8[i] ^ wv_b9[i];
       os[i] = x1;);
     uint32_t *r16 = wv_a8;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r16;
       uint32_t x1 = r16[i];
-      uint32_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
+      uint32_t x13 = x1 >> 16U | x1 << 16U;
       os[i] = x13;);
-    uint32_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b10 = wv + d1 * (uint32_t)4U;
+    uint32_t *wv_a9 = wv + c * 4U;
+    uint32_t *wv_b10 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a9;
       uint32_t x1 = wv_a9[i] + wv_b10[i];
       os[i] = x1;);
-    uint32_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b11 = wv + c * (uint32_t)4U;
+    uint32_t *wv_a10 = wv + b * 4U;
+    uint32_t *wv_b11 = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a10;
       uint32_t x1 = wv_a10[i] ^ wv_b11[i];
       os[i] = x1;);
     uint32_t *r17 = wv_a10;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r17;
       uint32_t x1 = r17[i];
-      uint32_t x13 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
+      uint32_t x13 = x1 >> 12U | x1 << 20U;
       os[i] = x13;);
-    uint32_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b12 = wv + b * (uint32_t)4U;
+    uint32_t *wv_a11 = wv + a0 * 4U;
+    uint32_t *wv_b12 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a11;
       uint32_t x1 = wv_a11[i] + wv_b12[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a11;
       uint32_t x1 = wv_a11[i] + w[i];
       os[i] = x1;);
-    uint32_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b13 = wv + a0 * (uint32_t)4U;
+    uint32_t *wv_a12 = wv + d1 * 4U;
+    uint32_t *wv_b13 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a12;
       uint32_t x1 = wv_a12[i] ^ wv_b13[i];
       os[i] = x1;);
     uint32_t *r18 = wv_a12;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r18;
       uint32_t x1 = r18[i];
-      uint32_t x13 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
+      uint32_t x13 = x1 >> 8U | x1 << 24U;
       os[i] = x13;);
-    uint32_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b14 = wv + d1 * (uint32_t)4U;
+    uint32_t *wv_a13 = wv + c * 4U;
+    uint32_t *wv_b14 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a13;
       uint32_t x1 = wv_a13[i] + wv_b14[i];
       os[i] = x1;);
-    uint32_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b = wv + c * (uint32_t)4U;
+    uint32_t *wv_a14 = wv + b * 4U;
+    uint32_t *wv_b = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a14;
       uint32_t x1 = wv_a14[i] ^ wv_b[i];
       os[i] = x1;);
     uint32_t *r19 = wv_a14;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r19;
       uint32_t x1 = r19[i];
-      uint32_t x13 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
+      uint32_t x13 = x1 >> 7U | x1 << 25U;
       os[i] = x13;);
-    uint32_t *r113 = wv + (uint32_t)4U;
-    uint32_t *r2 = wv + (uint32_t)8U;
-    uint32_t *r3 = wv + (uint32_t)12U;
+    uint32_t *r113 = wv + 4U;
+    uint32_t *r2 = wv + 8U;
+    uint32_t *r3 = wv + 12U;
     uint32_t *r11 = r113;
     uint32_t x03 = r11[3U];
     uint32_t x13 = r11[0U];
@@ -1091,36 +1079,36 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
     r115[2U] = x2;
     r115[3U] = x3;);
   uint32_t *s0 = hash;
-  uint32_t *s1 = hash + (uint32_t)4U;
+  uint32_t *s1 = hash + 4U;
   uint32_t *r0 = wv;
-  uint32_t *r1 = wv + (uint32_t)4U;
-  uint32_t *r2 = wv + (uint32_t)8U;
-  uint32_t *r3 = wv + (uint32_t)12U;
+  uint32_t *r1 = wv + 4U;
+  uint32_t *r2 = wv + 8U;
+  uint32_t *r3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s0;
     uint32_t x = s0[i] ^ r0[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s0;
     uint32_t x = s0[i] ^ r2[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s1;
     uint32_t x = s1[i] ^ r1[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s1;
     uint32_t x = s1[i] ^ r3[i];
     os[i] = x;);
@@ -1129,9 +1117,9 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
 void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
 {
   uint32_t *r0 = hash;
-  uint32_t *r1 = hash + (uint32_t)4U;
-  uint32_t *r2 = hash + (uint32_t)8U;
-  uint32_t *r3 = hash + (uint32_t)12U;
+  uint32_t *r1 = hash + 4U;
+  uint32_t *r2 = hash + 8U;
+  uint32_t *r3 = hash + 12U;
   uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
   uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
   uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
@@ -1148,8 +1136,8 @@ void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
   r3[1U] = iv5;
   r3[2U] = iv6;
   r3[3U] = iv7;
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
   r0[0U] = iv0_;
   r0[1U] = iv1;
   r0[2U] = iv2;
@@ -1169,10 +1157,10 @@ Hacl_Blake2s_32_blake2s_update_key(
   uint32_t ll
 )
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
+  uint64_t lb = (uint64_t)64U;
   uint8_t b[64U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2s_update_block(wv, hash, true, lb, b);
   }
@@ -1180,7 +1168,7 @@ Hacl_Blake2s_32_blake2s_update_key(
   {
     blake2s_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 void
@@ -1193,11 +1181,11 @@ Hacl_Blake2s_32_blake2s_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
     blake2s_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -1217,7 +1205,7 @@ Hacl_Blake2s_32_blake2s_update_last(
   memcpy(b, last, rem * sizeof (uint8_t));
   uint64_t totlen = prev + (uint64_t)len;
   blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 static void
@@ -1229,13 +1217,13 @@ blake2s_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 64U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -1251,40 +1239,32 @@ blake2s_update_blocks(
 static inline void
 blake2s_update(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
   {
     Hacl_Blake2s_32_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2s_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
+  blake2s_update_blocks(ll, wv, hash, (uint64_t)0U, d);
 }
 
 void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash)
 {
   uint8_t b[32U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
+  uint8_t *second = b + 16U;
   uint32_t *row0 = hash;
-  uint32_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(first + i * (uint32_t)4U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(second + i * (uint32_t)4U, row1[i]););
+  uint32_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(first + i * 4U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(second + i * 4U, row1[i]););
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
 }
 
 /**
@@ -1312,13 +1292,13 @@ Hacl_Blake2s_32_blake2s(
   Hacl_Blake2s_32_blake2s_init(b, kk, nn);
   blake2s_update(b1, b, kk, k, ll, d);
   Hacl_Blake2s_32_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint32_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint32_t);
+  Lib_Memzero0_memzero(b1, 16U, uint32_t);
+  Lib_Memzero0_memzero(b, 16U, uint32_t);
 }
 
 uint32_t *Hacl_Blake2s_32_blake2s_malloc(void)
 {
-  uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+  uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
   return buf;
 }
 
diff --git a/src/Hacl_Hash_Blake2b_256.c b/src/Hacl_Hash_Blake2b_256.c
index b37ffc5f..a265226b 100644
--- a/src/Hacl_Hash_Blake2b_256.c
+++ b/src/Hacl_Hash_Blake2b_256.c
@@ -40,11 +40,11 @@ blake2b_update_block(
 {
   uint64_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
+    uint8_t *bj = d + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -53,159 +53,159 @@ blake2b_update_block(
   uint64_t wv_14;
   if (flag)
   {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
   }
   else
   {
-    wv_14 = (uint64_t)0U;
+    wv_14 = 0ULL;
   }
-  uint64_t wv_15 = (uint64_t)0U;
+  uint64_t wv_15 = 0ULL;
   mask =
     Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen),
-      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)),
+      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)),
       wv_14,
       wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + (uint32_t)3U;
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + 3U;
   wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask);
   KRML_MAYBE_FOR12(i,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U };
     Lib_IntVector_Intrinsics_vec256 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
     r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
     r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
     r30[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
     Lib_IntVector_Intrinsics_vec256 *x = m_st;
-    Lib_IntVector_Intrinsics_vec256 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * 1U;
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]);
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * 1U;
     wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * (uint32_t)1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * 1U;
     wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * 1U;
     wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * (uint32_t)1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * 1U;
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]);
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * 1U;
     wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * (uint32_t)1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * 1U;
     wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * 1U;
     wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r31 = wv + (uint32_t)3U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r31 = wv + 3U;
     Lib_IntVector_Intrinsics_vec256 v00 = r10[0U];
     Lib_IntVector_Intrinsics_vec256
-    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, (uint32_t)1U);
+    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, 1U);
     r10[0U] = v1;
     Lib_IntVector_Intrinsics_vec256 v01 = r21[0U];
     Lib_IntVector_Intrinsics_vec256
-    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, (uint32_t)2U);
+    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, 2U);
     r21[0U] = v10;
     Lib_IntVector_Intrinsics_vec256 v02 = r31[0U];
     Lib_IntVector_Intrinsics_vec256
-    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, (uint32_t)3U);
+    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, 3U);
     r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * (uint32_t)1U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * 1U;
     wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]);
     wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * 1U;
     wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * (uint32_t)1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * 1U;
     wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * 1U;
     wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * (uint32_t)1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * 1U;
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]);
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * 1U;
     wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * (uint32_t)1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * 1U;
     wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * 1U;
     wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
     Lib_IntVector_Intrinsics_vec256 v0 = r11[0U];
     Lib_IntVector_Intrinsics_vec256
-    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, (uint32_t)3U);
+    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, 3U);
     r11[0U] = v12;
     Lib_IntVector_Intrinsics_vec256 v03 = r2[0U];
     Lib_IntVector_Intrinsics_vec256
-    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, (uint32_t)2U);
+    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, 2U);
     r2[0U] = v13;
     Lib_IntVector_Intrinsics_vec256 v04 = r3[0U];
     Lib_IntVector_Intrinsics_vec256
-    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, (uint32_t)1U);
+    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, 1U);
     r3[0U] = v14;);
   Lib_IntVector_Intrinsics_vec256 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *s1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec256 *s1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec256 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec256 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
   s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r0[0U]);
   s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r2[0U]);
   s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r1[0U]);
@@ -216,9 +216,9 @@ void
 Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn)
 {
   Lib_IntVector_Intrinsics_vec256 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = hash + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U;
   uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
   uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
   uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
@@ -229,8 +229,8 @@ Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk
   uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U];
   r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3);
   r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
   r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3);
   r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
 }
@@ -244,10 +244,10 @@ Hacl_Blake2b_256_blake2b_update_key(
   uint32_t ll
 )
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
   uint8_t b[128U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2b_update_block(wv, hash, true, lb, b);
   }
@@ -255,7 +255,7 @@ Hacl_Blake2b_256_blake2b_update_key(
   {
     blake2b_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 void
@@ -268,14 +268,14 @@ Hacl_Blake2b_256_blake2b_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
     FStar_UInt128_uint128
     totlen =
       FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
     blake2b_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -296,7 +296,7 @@ Hacl_Blake2b_256_blake2b_update_last(
   FStar_UInt128_uint128
   totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
   blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 static inline void
@@ -308,13 +308,13 @@ blake2b_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 128U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -337,22 +337,18 @@ blake2b_update(
   uint8_t *d
 )
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
   {
     Hacl_Blake2b_256_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2b_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
+  blake2b_update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
 }
 
 void
@@ -364,14 +360,14 @@ Hacl_Blake2b_256_blake2b_finish(
 {
   uint8_t b[64U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
+  uint8_t *second = b + 32U;
   Lib_IntVector_Intrinsics_vec256 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *row1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec256 *row1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]);
   Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]);
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 /**
@@ -399,8 +395,8 @@ Hacl_Blake2b_256_blake2b(
   Hacl_Blake2b_256_blake2b_init(b, kk, nn);
   blake2b_update(b1, b, kk, k, ll, d);
   Hacl_Blake2b_256_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256);
 }
 
 void
@@ -410,13 +406,13 @@ Hacl_Blake2b_256_load_state256b_from_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
   uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
   r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b0[0U], b0[1U], b0[2U], b0[3U]);
   r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b1[0U], b1[1U], b1[2U], b1[3U]);
   r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b2[0U], b2[1U], b2[2U], b2[3U]);
@@ -430,21 +426,21 @@ Hacl_Blake2b_256_store_state256b_to_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
   uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
   uint8_t b8[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b8, r0[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)8U;
+    uint8_t *bj = b8 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -452,11 +448,11 @@ Hacl_Blake2b_256_store_state256b_to_state32(
   uint8_t b80[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)8U;
+    uint8_t *bj = b80 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -464,11 +460,11 @@ Hacl_Blake2b_256_store_state256b_to_state32(
   uint8_t b81[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)8U;
+    uint8_t *bj = b81 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -476,11 +472,11 @@ Hacl_Blake2b_256_store_state256b_to_state32(
   uint8_t b82[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)8U;
+    uint8_t *bj = b82 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -492,8 +488,8 @@ Lib_IntVector_Intrinsics_vec256 *Hacl_Blake2b_256_blake2b_malloc(void)
   Lib_IntVector_Intrinsics_vec256
   *buf =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   return buf;
 }
 
diff --git a/src/Hacl_Hash_Blake2s_128.c b/src/Hacl_Hash_Blake2s_128.c
index 86c4f030..0f3dea1f 100644
--- a/src/Hacl_Hash_Blake2s_128.c
+++ b/src/Hacl_Hash_Blake2s_128.c
@@ -40,11 +40,11 @@ blake2s_update_block(
 {
   uint32_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
+    uint8_t *bj = d + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -53,159 +53,159 @@ blake2s_update_block(
   uint32_t wv_14;
   if (flag)
   {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFU;
   }
   else
   {
-    wv_14 = (uint32_t)0U;
+    wv_14 = 0U;
   }
-  uint32_t wv_15 = (uint32_t)0U;
+  uint32_t wv_15 = 0U;
   mask =
     Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen,
-      (uint32_t)(totlen >> (uint32_t)32U),
+      (uint32_t)(totlen >> 32U),
       wv_14,
       wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + (uint32_t)3U;
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + 3U;
   wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask);
   KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U };
     Lib_IntVector_Intrinsics_vec128 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
     r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
     r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
     r30[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
     Lib_IntVector_Intrinsics_vec128 *x = m_st;
-    Lib_IntVector_Intrinsics_vec128 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * 1U;
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]);
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * 1U;
     wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * (uint32_t)1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * 1U;
     wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * 1U;
     wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * (uint32_t)1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * 1U;
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]);
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * 1U;
     wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * (uint32_t)1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * 1U;
     wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * 1U;
     wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r31 = wv + (uint32_t)3U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r31 = wv + 3U;
     Lib_IntVector_Intrinsics_vec128 v00 = r10[0U];
     Lib_IntVector_Intrinsics_vec128
-    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, (uint32_t)1U);
+    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, 1U);
     r10[0U] = v1;
     Lib_IntVector_Intrinsics_vec128 v01 = r21[0U];
     Lib_IntVector_Intrinsics_vec128
-    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, (uint32_t)2U);
+    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, 2U);
     r21[0U] = v10;
     Lib_IntVector_Intrinsics_vec128 v02 = r31[0U];
     Lib_IntVector_Intrinsics_vec128
-    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, (uint32_t)3U);
+    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, 3U);
     r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * (uint32_t)1U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * 1U;
     wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]);
     wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * 1U;
     wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * (uint32_t)1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * 1U;
     wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * 1U;
     wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * (uint32_t)1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * 1U;
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]);
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * 1U;
     wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * (uint32_t)1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * 1U;
     wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * 1U;
     wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
     Lib_IntVector_Intrinsics_vec128 v0 = r11[0U];
     Lib_IntVector_Intrinsics_vec128
-    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, (uint32_t)3U);
+    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, 3U);
     r11[0U] = v12;
     Lib_IntVector_Intrinsics_vec128 v03 = r2[0U];
     Lib_IntVector_Intrinsics_vec128
-    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, (uint32_t)2U);
+    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, 2U);
     r2[0U] = v13;
     Lib_IntVector_Intrinsics_vec128 v04 = r3[0U];
     Lib_IntVector_Intrinsics_vec128
-    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, (uint32_t)1U);
+    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, 1U);
     r3[0U] = v14;);
   Lib_IntVector_Intrinsics_vec128 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *s1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec128 *s1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec128 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec128 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
   s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r0[0U]);
   s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r2[0U]);
   s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r1[0U]);
@@ -216,9 +216,9 @@ void
 Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn)
 {
   Lib_IntVector_Intrinsics_vec128 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = hash + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U;
   uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
   uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
   uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
@@ -229,8 +229,8 @@ Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk
   uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U];
   r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3);
   r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
   r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3);
   r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
 }
@@ -244,10 +244,10 @@ Hacl_Blake2s_128_blake2s_update_key(
   uint32_t ll
 )
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
+  uint64_t lb = (uint64_t)64U;
   uint8_t b[64U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2s_update_block(wv, hash, true, lb, b);
   }
@@ -255,7 +255,7 @@ Hacl_Blake2s_128_blake2s_update_key(
   {
     blake2s_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 void
@@ -268,11 +268,11 @@ Hacl_Blake2s_128_blake2s_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
     blake2s_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -292,7 +292,7 @@ Hacl_Blake2s_128_blake2s_update_last(
   memcpy(b, last, rem * sizeof (uint8_t));
   uint64_t totlen = prev + (uint64_t)len;
   blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 static inline void
@@ -304,13 +304,13 @@ blake2s_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 64U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -333,18 +333,18 @@ blake2s_update(
   uint8_t *d
 )
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
   {
     Hacl_Blake2s_128_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2s_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
+  blake2s_update_blocks(ll, wv, hash, (uint64_t)0U, d);
 }
 
 void
@@ -356,14 +356,14 @@ Hacl_Blake2s_128_blake2s_finish(
 {
   uint8_t b[32U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
+  uint8_t *second = b + 16U;
   Lib_IntVector_Intrinsics_vec128 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *row1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec128 *row1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]);
   Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]);
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
 }
 
 /**
@@ -391,8 +391,8 @@ Hacl_Blake2s_128_blake2s(
   Hacl_Blake2s_128_blake2s_init(b, kk, nn);
   blake2s_update(b1, b, kk, k, ll, d);
   Hacl_Blake2s_128_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128);
 }
 
 void
@@ -402,21 +402,21 @@ Hacl_Blake2s_128_store_state128s_to_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
   uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
   uint8_t b8[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b8, r0[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)4U;
+    uint8_t *bj = b8 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -424,11 +424,11 @@ Hacl_Blake2s_128_store_state128s_to_state32(
   uint8_t b80[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)4U;
+    uint8_t *bj = b80 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -436,11 +436,11 @@ Hacl_Blake2s_128_store_state128s_to_state32(
   uint8_t b81[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)4U;
+    uint8_t *bj = b81 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -448,11 +448,11 @@ Hacl_Blake2s_128_store_state128s_to_state32(
   uint8_t b82[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)4U;
+    uint8_t *bj = b82 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -466,13 +466,13 @@ Hacl_Blake2s_128_load_state128s_from_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
   uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
   r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b0[0U], b0[1U], b0[2U], b0[3U]);
   r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b1[0U], b1[1U], b1[2U], b1[3U]);
   r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b2[0U], b2[1U], b2[2U], b2[3U]);
@@ -484,8 +484,8 @@ Lib_IntVector_Intrinsics_vec128 *Hacl_Blake2s_128_blake2s_malloc(void)
   Lib_IntVector_Intrinsics_vec128
   *buf =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   return buf;
 }
 
diff --git a/src/Hacl_Hash_MD5.c b/src/Hacl_Hash_MD5.c
index 222ac824..8ef87a1e 100644
--- a/src/Hacl_Hash_MD5.c
+++ b/src/Hacl_Hash_MD5.c
@@ -25,34 +25,26 @@
 
 #include "internal/Hacl_Hash_MD5.h"
 
-static uint32_t
-_h0[4U] =
-  { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
+static uint32_t _h0[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
 
 static uint32_t
 _t[64U] =
   {
-    (uint32_t)0xd76aa478U, (uint32_t)0xe8c7b756U, (uint32_t)0x242070dbU, (uint32_t)0xc1bdceeeU,
-    (uint32_t)0xf57c0fafU, (uint32_t)0x4787c62aU, (uint32_t)0xa8304613U, (uint32_t)0xfd469501U,
-    (uint32_t)0x698098d8U, (uint32_t)0x8b44f7afU, (uint32_t)0xffff5bb1U, (uint32_t)0x895cd7beU,
-    (uint32_t)0x6b901122U, (uint32_t)0xfd987193U, (uint32_t)0xa679438eU, (uint32_t)0x49b40821U,
-    (uint32_t)0xf61e2562U, (uint32_t)0xc040b340U, (uint32_t)0x265e5a51U, (uint32_t)0xe9b6c7aaU,
-    (uint32_t)0xd62f105dU, (uint32_t)0x02441453U, (uint32_t)0xd8a1e681U, (uint32_t)0xe7d3fbc8U,
-    (uint32_t)0x21e1cde6U, (uint32_t)0xc33707d6U, (uint32_t)0xf4d50d87U, (uint32_t)0x455a14edU,
-    (uint32_t)0xa9e3e905U, (uint32_t)0xfcefa3f8U, (uint32_t)0x676f02d9U, (uint32_t)0x8d2a4c8aU,
-    (uint32_t)0xfffa3942U, (uint32_t)0x8771f681U, (uint32_t)0x6d9d6122U, (uint32_t)0xfde5380cU,
-    (uint32_t)0xa4beea44U, (uint32_t)0x4bdecfa9U, (uint32_t)0xf6bb4b60U, (uint32_t)0xbebfbc70U,
-    (uint32_t)0x289b7ec6U, (uint32_t)0xeaa127faU, (uint32_t)0xd4ef3085U, (uint32_t)0x4881d05U,
-    (uint32_t)0xd9d4d039U, (uint32_t)0xe6db99e5U, (uint32_t)0x1fa27cf8U, (uint32_t)0xc4ac5665U,
-    (uint32_t)0xf4292244U, (uint32_t)0x432aff97U, (uint32_t)0xab9423a7U, (uint32_t)0xfc93a039U,
-    (uint32_t)0x655b59c3U, (uint32_t)0x8f0ccc92U, (uint32_t)0xffeff47dU, (uint32_t)0x85845dd1U,
-    (uint32_t)0x6fa87e4fU, (uint32_t)0xfe2ce6e0U, (uint32_t)0xa3014314U, (uint32_t)0x4e0811a1U,
-    (uint32_t)0xf7537e82U, (uint32_t)0xbd3af235U, (uint32_t)0x2ad7d2bbU, (uint32_t)0xeb86d391U
+    0xd76aa478U, 0xe8c7b756U, 0x242070dbU, 0xc1bdceeeU, 0xf57c0fafU, 0x4787c62aU, 0xa8304613U,
+    0xfd469501U, 0x698098d8U, 0x8b44f7afU, 0xffff5bb1U, 0x895cd7beU, 0x6b901122U, 0xfd987193U,
+    0xa679438eU, 0x49b40821U, 0xf61e2562U, 0xc040b340U, 0x265e5a51U, 0xe9b6c7aaU, 0xd62f105dU,
+    0x02441453U, 0xd8a1e681U, 0xe7d3fbc8U, 0x21e1cde6U, 0xc33707d6U, 0xf4d50d87U, 0x455a14edU,
+    0xa9e3e905U, 0xfcefa3f8U, 0x676f02d9U, 0x8d2a4c8aU, 0xfffa3942U, 0x8771f681U, 0x6d9d6122U,
+    0xfde5380cU, 0xa4beea44U, 0x4bdecfa9U, 0xf6bb4b60U, 0xbebfbc70U, 0x289b7ec6U, 0xeaa127faU,
+    0xd4ef3085U, 0x4881d05U, 0xd9d4d039U, 0xe6db99e5U, 0x1fa27cf8U, 0xc4ac5665U, 0xf4292244U,
+    0x432aff97U, 0xab9423a7U, 0xfc93a039U, 0x655b59c3U, 0x8f0ccc92U, 0xffeff47dU, 0x85845dd1U,
+    0x6fa87e4fU, 0xfe2ce6e0U, 0xa3014314U, 0x4e0811a1U, 0xf7537e82U, 0xbd3af235U, 0x2ad7d2bbU,
+    0xeb86d391U
   };
 
 void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, s[i] = _h0[i];);
 }
 
 static void legacy_update(uint32_t *abcd, uint8_t *x)
@@ -74,14 +66,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb0
     +
       ((va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0)
-      << (uint32_t)7U
-      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> (uint32_t)25U);
+      << 7U
+      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> 25U);
   abcd[0U] = v;
   uint32_t va0 = abcd[3U];
   uint32_t vb1 = abcd[0U];
   uint32_t vc1 = abcd[1U];
   uint32_t vd1 = abcd[2U];
-  uint8_t *b1 = x + (uint32_t)4U;
+  uint8_t *b1 = x + 4U;
   uint32_t u0 = load32_le(b1);
   uint32_t xk0 = u0;
   uint32_t ti1 = _t[1U];
@@ -90,14 +82,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb1
     +
       ((va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1)
-      << (uint32_t)12U
-      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> (uint32_t)20U);
+      << 12U
+      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> 20U);
   abcd[3U] = v0;
   uint32_t va1 = abcd[2U];
   uint32_t vb2 = abcd[3U];
   uint32_t vc2 = abcd[0U];
   uint32_t vd2 = abcd[1U];
-  uint8_t *b2 = x + (uint32_t)8U;
+  uint8_t *b2 = x + 8U;
   uint32_t u1 = load32_le(b2);
   uint32_t xk1 = u1;
   uint32_t ti2 = _t[2U];
@@ -106,14 +98,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb2
     +
       ((va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2)
-      << (uint32_t)17U
-      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> (uint32_t)15U);
+      << 17U
+      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> 15U);
   abcd[2U] = v1;
   uint32_t va2 = abcd[1U];
   uint32_t vb3 = abcd[2U];
   uint32_t vc3 = abcd[3U];
   uint32_t vd3 = abcd[0U];
-  uint8_t *b3 = x + (uint32_t)12U;
+  uint8_t *b3 = x + 12U;
   uint32_t u2 = load32_le(b3);
   uint32_t xk2 = u2;
   uint32_t ti3 = _t[3U];
@@ -122,14 +114,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb3
     +
       ((va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3)
-      << (uint32_t)22U
-      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> (uint32_t)10U);
+      << 22U
+      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> 10U);
   abcd[1U] = v2;
   uint32_t va3 = abcd[0U];
   uint32_t vb4 = abcd[1U];
   uint32_t vc4 = abcd[2U];
   uint32_t vd4 = abcd[3U];
-  uint8_t *b4 = x + (uint32_t)16U;
+  uint8_t *b4 = x + 16U;
   uint32_t u3 = load32_le(b4);
   uint32_t xk3 = u3;
   uint32_t ti4 = _t[4U];
@@ -138,14 +130,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb4
     +
       ((va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4)
-      << (uint32_t)7U
-      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> (uint32_t)25U);
+      << 7U
+      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> 25U);
   abcd[0U] = v3;
   uint32_t va4 = abcd[3U];
   uint32_t vb5 = abcd[0U];
   uint32_t vc5 = abcd[1U];
   uint32_t vd5 = abcd[2U];
-  uint8_t *b5 = x + (uint32_t)20U;
+  uint8_t *b5 = x + 20U;
   uint32_t u4 = load32_le(b5);
   uint32_t xk4 = u4;
   uint32_t ti5 = _t[5U];
@@ -154,14 +146,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb5
     +
       ((va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5)
-      << (uint32_t)12U
-      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> (uint32_t)20U);
+      << 12U
+      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> 20U);
   abcd[3U] = v4;
   uint32_t va5 = abcd[2U];
   uint32_t vb6 = abcd[3U];
   uint32_t vc6 = abcd[0U];
   uint32_t vd6 = abcd[1U];
-  uint8_t *b6 = x + (uint32_t)24U;
+  uint8_t *b6 = x + 24U;
   uint32_t u5 = load32_le(b6);
   uint32_t xk5 = u5;
   uint32_t ti6 = _t[6U];
@@ -170,14 +162,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb6
     +
       ((va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6)
-      << (uint32_t)17U
-      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> (uint32_t)15U);
+      << 17U
+      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> 15U);
   abcd[2U] = v5;
   uint32_t va6 = abcd[1U];
   uint32_t vb7 = abcd[2U];
   uint32_t vc7 = abcd[3U];
   uint32_t vd7 = abcd[0U];
-  uint8_t *b7 = x + (uint32_t)28U;
+  uint8_t *b7 = x + 28U;
   uint32_t u6 = load32_le(b7);
   uint32_t xk6 = u6;
   uint32_t ti7 = _t[7U];
@@ -186,14 +178,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb7
     +
       ((va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7)
-      << (uint32_t)22U
-      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> (uint32_t)10U);
+      << 22U
+      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> 10U);
   abcd[1U] = v6;
   uint32_t va7 = abcd[0U];
   uint32_t vb8 = abcd[1U];
   uint32_t vc8 = abcd[2U];
   uint32_t vd8 = abcd[3U];
-  uint8_t *b8 = x + (uint32_t)32U;
+  uint8_t *b8 = x + 32U;
   uint32_t u7 = load32_le(b8);
   uint32_t xk7 = u7;
   uint32_t ti8 = _t[8U];
@@ -202,14 +194,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb8
     +
       ((va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8)
-      << (uint32_t)7U
-      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> (uint32_t)25U);
+      << 7U
+      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> 25U);
   abcd[0U] = v7;
   uint32_t va8 = abcd[3U];
   uint32_t vb9 = abcd[0U];
   uint32_t vc9 = abcd[1U];
   uint32_t vd9 = abcd[2U];
-  uint8_t *b9 = x + (uint32_t)36U;
+  uint8_t *b9 = x + 36U;
   uint32_t u8 = load32_le(b9);
   uint32_t xk8 = u8;
   uint32_t ti9 = _t[9U];
@@ -218,14 +210,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb9
     +
       ((va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9)
-      << (uint32_t)12U
-      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> (uint32_t)20U);
+      << 12U
+      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> 20U);
   abcd[3U] = v8;
   uint32_t va9 = abcd[2U];
   uint32_t vb10 = abcd[3U];
   uint32_t vc10 = abcd[0U];
   uint32_t vd10 = abcd[1U];
-  uint8_t *b10 = x + (uint32_t)40U;
+  uint8_t *b10 = x + 40U;
   uint32_t u9 = load32_le(b10);
   uint32_t xk9 = u9;
   uint32_t ti10 = _t[10U];
@@ -234,14 +226,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb10
     +
       ((va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10)
-      << (uint32_t)17U
-      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> (uint32_t)15U);
+      << 17U
+      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> 15U);
   abcd[2U] = v9;
   uint32_t va10 = abcd[1U];
   uint32_t vb11 = abcd[2U];
   uint32_t vc11 = abcd[3U];
   uint32_t vd11 = abcd[0U];
-  uint8_t *b11 = x + (uint32_t)44U;
+  uint8_t *b11 = x + 44U;
   uint32_t u10 = load32_le(b11);
   uint32_t xk10 = u10;
   uint32_t ti11 = _t[11U];
@@ -250,14 +242,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb11
     +
       ((va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11)
-      << (uint32_t)22U
-      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> (uint32_t)10U);
+      << 22U
+      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> 10U);
   abcd[1U] = v10;
   uint32_t va11 = abcd[0U];
   uint32_t vb12 = abcd[1U];
   uint32_t vc12 = abcd[2U];
   uint32_t vd12 = abcd[3U];
-  uint8_t *b12 = x + (uint32_t)48U;
+  uint8_t *b12 = x + 48U;
   uint32_t u11 = load32_le(b12);
   uint32_t xk11 = u11;
   uint32_t ti12 = _t[12U];
@@ -266,14 +258,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb12
     +
       ((va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12)
-      << (uint32_t)7U
-      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> (uint32_t)25U);
+      << 7U
+      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> 25U);
   abcd[0U] = v11;
   uint32_t va12 = abcd[3U];
   uint32_t vb13 = abcd[0U];
   uint32_t vc13 = abcd[1U];
   uint32_t vd13 = abcd[2U];
-  uint8_t *b13 = x + (uint32_t)52U;
+  uint8_t *b13 = x + 52U;
   uint32_t u12 = load32_le(b13);
   uint32_t xk12 = u12;
   uint32_t ti13 = _t[13U];
@@ -282,14 +274,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb13
     +
       ((va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13)
-      << (uint32_t)12U
-      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> (uint32_t)20U);
+      << 12U
+      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> 20U);
   abcd[3U] = v12;
   uint32_t va13 = abcd[2U];
   uint32_t vb14 = abcd[3U];
   uint32_t vc14 = abcd[0U];
   uint32_t vd14 = abcd[1U];
-  uint8_t *b14 = x + (uint32_t)56U;
+  uint8_t *b14 = x + 56U;
   uint32_t u13 = load32_le(b14);
   uint32_t xk13 = u13;
   uint32_t ti14 = _t[14U];
@@ -298,14 +290,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb14
     +
       ((va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14)
-      << (uint32_t)17U
-      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> (uint32_t)15U);
+      << 17U
+      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> 15U);
   abcd[2U] = v13;
   uint32_t va14 = abcd[1U];
   uint32_t vb15 = abcd[2U];
   uint32_t vc15 = abcd[3U];
   uint32_t vd15 = abcd[0U];
-  uint8_t *b15 = x + (uint32_t)60U;
+  uint8_t *b15 = x + 60U;
   uint32_t u14 = load32_le(b15);
   uint32_t xk14 = u14;
   uint32_t ti15 = _t[15U];
@@ -314,14 +306,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb15
     +
       ((va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15)
-      << (uint32_t)22U
-      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> (uint32_t)10U);
+      << 22U
+      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> 10U);
   abcd[1U] = v14;
   uint32_t va15 = abcd[0U];
   uint32_t vb16 = abcd[1U];
   uint32_t vc16 = abcd[2U];
   uint32_t vd16 = abcd[3U];
-  uint8_t *b16 = x + (uint32_t)4U;
+  uint8_t *b16 = x + 4U;
   uint32_t u15 = load32_le(b16);
   uint32_t xk15 = u15;
   uint32_t ti16 = _t[16U];
@@ -330,14 +322,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb16
     +
       ((va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16)
-      << (uint32_t)5U
-      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> (uint32_t)27U);
+      << 5U
+      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> 27U);
   abcd[0U] = v15;
   uint32_t va16 = abcd[3U];
   uint32_t vb17 = abcd[0U];
   uint32_t vc17 = abcd[1U];
   uint32_t vd17 = abcd[2U];
-  uint8_t *b17 = x + (uint32_t)24U;
+  uint8_t *b17 = x + 24U;
   uint32_t u16 = load32_le(b17);
   uint32_t xk16 = u16;
   uint32_t ti17 = _t[17U];
@@ -346,14 +338,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb17
     +
       ((va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17)
-      << (uint32_t)9U
-      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> (uint32_t)23U);
+      << 9U
+      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> 23U);
   abcd[3U] = v16;
   uint32_t va17 = abcd[2U];
   uint32_t vb18 = abcd[3U];
   uint32_t vc18 = abcd[0U];
   uint32_t vd18 = abcd[1U];
-  uint8_t *b18 = x + (uint32_t)44U;
+  uint8_t *b18 = x + 44U;
   uint32_t u17 = load32_le(b18);
   uint32_t xk17 = u17;
   uint32_t ti18 = _t[18U];
@@ -362,8 +354,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb18
     +
       ((va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18)
-      << (uint32_t)14U
-      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> (uint32_t)18U);
+      << 14U
+      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> 18U);
   abcd[2U] = v17;
   uint32_t va18 = abcd[1U];
   uint32_t vb19 = abcd[2U];
@@ -378,14 +370,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb19
     +
       ((va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19)
-      << (uint32_t)20U
-      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> (uint32_t)12U);
+      << 20U
+      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> 12U);
   abcd[1U] = v18;
   uint32_t va19 = abcd[0U];
   uint32_t vb20 = abcd[1U];
   uint32_t vc20 = abcd[2U];
   uint32_t vd20 = abcd[3U];
-  uint8_t *b20 = x + (uint32_t)20U;
+  uint8_t *b20 = x + 20U;
   uint32_t u19 = load32_le(b20);
   uint32_t xk19 = u19;
   uint32_t ti20 = _t[20U];
@@ -394,14 +386,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb20
     +
       ((va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20)
-      << (uint32_t)5U
-      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> (uint32_t)27U);
+      << 5U
+      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> 27U);
   abcd[0U] = v19;
   uint32_t va20 = abcd[3U];
   uint32_t vb21 = abcd[0U];
   uint32_t vc21 = abcd[1U];
   uint32_t vd21 = abcd[2U];
-  uint8_t *b21 = x + (uint32_t)40U;
+  uint8_t *b21 = x + 40U;
   uint32_t u20 = load32_le(b21);
   uint32_t xk20 = u20;
   uint32_t ti21 = _t[21U];
@@ -410,14 +402,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb21
     +
       ((va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21)
-      << (uint32_t)9U
-      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> (uint32_t)23U);
+      << 9U
+      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> 23U);
   abcd[3U] = v20;
   uint32_t va21 = abcd[2U];
   uint32_t vb22 = abcd[3U];
   uint32_t vc22 = abcd[0U];
   uint32_t vd22 = abcd[1U];
-  uint8_t *b22 = x + (uint32_t)60U;
+  uint8_t *b22 = x + 60U;
   uint32_t u21 = load32_le(b22);
   uint32_t xk21 = u21;
   uint32_t ti22 = _t[22U];
@@ -426,14 +418,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb22
     +
       ((va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22)
-      << (uint32_t)14U
-      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> (uint32_t)18U);
+      << 14U
+      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> 18U);
   abcd[2U] = v21;
   uint32_t va22 = abcd[1U];
   uint32_t vb23 = abcd[2U];
   uint32_t vc23 = abcd[3U];
   uint32_t vd23 = abcd[0U];
-  uint8_t *b23 = x + (uint32_t)16U;
+  uint8_t *b23 = x + 16U;
   uint32_t u22 = load32_le(b23);
   uint32_t xk22 = u22;
   uint32_t ti23 = _t[23U];
@@ -442,14 +434,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb23
     +
       ((va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23)
-      << (uint32_t)20U
-      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> (uint32_t)12U);
+      << 20U
+      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> 12U);
   abcd[1U] = v22;
   uint32_t va23 = abcd[0U];
   uint32_t vb24 = abcd[1U];
   uint32_t vc24 = abcd[2U];
   uint32_t vd24 = abcd[3U];
-  uint8_t *b24 = x + (uint32_t)36U;
+  uint8_t *b24 = x + 36U;
   uint32_t u23 = load32_le(b24);
   uint32_t xk23 = u23;
   uint32_t ti24 = _t[24U];
@@ -458,14 +450,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb24
     +
       ((va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24)
-      << (uint32_t)5U
-      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> (uint32_t)27U);
+      << 5U
+      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> 27U);
   abcd[0U] = v23;
   uint32_t va24 = abcd[3U];
   uint32_t vb25 = abcd[0U];
   uint32_t vc25 = abcd[1U];
   uint32_t vd25 = abcd[2U];
-  uint8_t *b25 = x + (uint32_t)56U;
+  uint8_t *b25 = x + 56U;
   uint32_t u24 = load32_le(b25);
   uint32_t xk24 = u24;
   uint32_t ti25 = _t[25U];
@@ -474,14 +466,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb25
     +
       ((va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25)
-      << (uint32_t)9U
-      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> (uint32_t)23U);
+      << 9U
+      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> 23U);
   abcd[3U] = v24;
   uint32_t va25 = abcd[2U];
   uint32_t vb26 = abcd[3U];
   uint32_t vc26 = abcd[0U];
   uint32_t vd26 = abcd[1U];
-  uint8_t *b26 = x + (uint32_t)12U;
+  uint8_t *b26 = x + 12U;
   uint32_t u25 = load32_le(b26);
   uint32_t xk25 = u25;
   uint32_t ti26 = _t[26U];
@@ -490,14 +482,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb26
     +
       ((va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26)
-      << (uint32_t)14U
-      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> (uint32_t)18U);
+      << 14U
+      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> 18U);
   abcd[2U] = v25;
   uint32_t va26 = abcd[1U];
   uint32_t vb27 = abcd[2U];
   uint32_t vc27 = abcd[3U];
   uint32_t vd27 = abcd[0U];
-  uint8_t *b27 = x + (uint32_t)32U;
+  uint8_t *b27 = x + 32U;
   uint32_t u26 = load32_le(b27);
   uint32_t xk26 = u26;
   uint32_t ti27 = _t[27U];
@@ -506,14 +498,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb27
     +
       ((va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27)
-      << (uint32_t)20U
-      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> (uint32_t)12U);
+      << 20U
+      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> 12U);
   abcd[1U] = v26;
   uint32_t va27 = abcd[0U];
   uint32_t vb28 = abcd[1U];
   uint32_t vc28 = abcd[2U];
   uint32_t vd28 = abcd[3U];
-  uint8_t *b28 = x + (uint32_t)52U;
+  uint8_t *b28 = x + 52U;
   uint32_t u27 = load32_le(b28);
   uint32_t xk27 = u27;
   uint32_t ti28 = _t[28U];
@@ -522,14 +514,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb28
     +
       ((va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28)
-      << (uint32_t)5U
-      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> (uint32_t)27U);
+      << 5U
+      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> 27U);
   abcd[0U] = v27;
   uint32_t va28 = abcd[3U];
   uint32_t vb29 = abcd[0U];
   uint32_t vc29 = abcd[1U];
   uint32_t vd29 = abcd[2U];
-  uint8_t *b29 = x + (uint32_t)8U;
+  uint8_t *b29 = x + 8U;
   uint32_t u28 = load32_le(b29);
   uint32_t xk28 = u28;
   uint32_t ti29 = _t[29U];
@@ -538,14 +530,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb29
     +
       ((va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29)
-      << (uint32_t)9U
-      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> (uint32_t)23U);
+      << 9U
+      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> 23U);
   abcd[3U] = v28;
   uint32_t va29 = abcd[2U];
   uint32_t vb30 = abcd[3U];
   uint32_t vc30 = abcd[0U];
   uint32_t vd30 = abcd[1U];
-  uint8_t *b30 = x + (uint32_t)28U;
+  uint8_t *b30 = x + 28U;
   uint32_t u29 = load32_le(b30);
   uint32_t xk29 = u29;
   uint32_t ti30 = _t[30U];
@@ -554,14 +546,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb30
     +
       ((va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30)
-      << (uint32_t)14U
-      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> (uint32_t)18U);
+      << 14U
+      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> 18U);
   abcd[2U] = v29;
   uint32_t va30 = abcd[1U];
   uint32_t vb31 = abcd[2U];
   uint32_t vc31 = abcd[3U];
   uint32_t vd31 = abcd[0U];
-  uint8_t *b31 = x + (uint32_t)48U;
+  uint8_t *b31 = x + 48U;
   uint32_t u30 = load32_le(b31);
   uint32_t xk30 = u30;
   uint32_t ti31 = _t[31U];
@@ -570,14 +562,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb31
     +
       ((va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31)
-      << (uint32_t)20U
-      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> (uint32_t)12U);
+      << 20U
+      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> 12U);
   abcd[1U] = v30;
   uint32_t va31 = abcd[0U];
   uint32_t vb32 = abcd[1U];
   uint32_t vc32 = abcd[2U];
   uint32_t vd32 = abcd[3U];
-  uint8_t *b32 = x + (uint32_t)20U;
+  uint8_t *b32 = x + 20U;
   uint32_t u31 = load32_le(b32);
   uint32_t xk31 = u31;
   uint32_t ti32 = _t[32U];
@@ -586,14 +578,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb32
     +
       ((va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32)
-      << (uint32_t)4U
-      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> (uint32_t)28U);
+      << 4U
+      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> 28U);
   abcd[0U] = v31;
   uint32_t va32 = abcd[3U];
   uint32_t vb33 = abcd[0U];
   uint32_t vc33 = abcd[1U];
   uint32_t vd33 = abcd[2U];
-  uint8_t *b33 = x + (uint32_t)32U;
+  uint8_t *b33 = x + 32U;
   uint32_t u32 = load32_le(b33);
   uint32_t xk32 = u32;
   uint32_t ti33 = _t[33U];
@@ -602,14 +594,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb33
     +
       ((va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33)
-      << (uint32_t)11U
-      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> (uint32_t)21U);
+      << 11U
+      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> 21U);
   abcd[3U] = v32;
   uint32_t va33 = abcd[2U];
   uint32_t vb34 = abcd[3U];
   uint32_t vc34 = abcd[0U];
   uint32_t vd34 = abcd[1U];
-  uint8_t *b34 = x + (uint32_t)44U;
+  uint8_t *b34 = x + 44U;
   uint32_t u33 = load32_le(b34);
   uint32_t xk33 = u33;
   uint32_t ti34 = _t[34U];
@@ -618,14 +610,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb34
     +
       ((va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34)
-      << (uint32_t)16U
-      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> (uint32_t)16U);
+      << 16U
+      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> 16U);
   abcd[2U] = v33;
   uint32_t va34 = abcd[1U];
   uint32_t vb35 = abcd[2U];
   uint32_t vc35 = abcd[3U];
   uint32_t vd35 = abcd[0U];
-  uint8_t *b35 = x + (uint32_t)56U;
+  uint8_t *b35 = x + 56U;
   uint32_t u34 = load32_le(b35);
   uint32_t xk34 = u34;
   uint32_t ti35 = _t[35U];
@@ -634,14 +626,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb35
     +
       ((va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35)
-      << (uint32_t)23U
-      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> (uint32_t)9U);
+      << 23U
+      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> 9U);
   abcd[1U] = v34;
   uint32_t va35 = abcd[0U];
   uint32_t vb36 = abcd[1U];
   uint32_t vc36 = abcd[2U];
   uint32_t vd36 = abcd[3U];
-  uint8_t *b36 = x + (uint32_t)4U;
+  uint8_t *b36 = x + 4U;
   uint32_t u35 = load32_le(b36);
   uint32_t xk35 = u35;
   uint32_t ti36 = _t[36U];
@@ -650,14 +642,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb36
     +
       ((va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36)
-      << (uint32_t)4U
-      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> (uint32_t)28U);
+      << 4U
+      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> 28U);
   abcd[0U] = v35;
   uint32_t va36 = abcd[3U];
   uint32_t vb37 = abcd[0U];
   uint32_t vc37 = abcd[1U];
   uint32_t vd37 = abcd[2U];
-  uint8_t *b37 = x + (uint32_t)16U;
+  uint8_t *b37 = x + 16U;
   uint32_t u36 = load32_le(b37);
   uint32_t xk36 = u36;
   uint32_t ti37 = _t[37U];
@@ -666,14 +658,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb37
     +
       ((va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37)
-      << (uint32_t)11U
-      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> (uint32_t)21U);
+      << 11U
+      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> 21U);
   abcd[3U] = v36;
   uint32_t va37 = abcd[2U];
   uint32_t vb38 = abcd[3U];
   uint32_t vc38 = abcd[0U];
   uint32_t vd38 = abcd[1U];
-  uint8_t *b38 = x + (uint32_t)28U;
+  uint8_t *b38 = x + 28U;
   uint32_t u37 = load32_le(b38);
   uint32_t xk37 = u37;
   uint32_t ti38 = _t[38U];
@@ -682,14 +674,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb38
     +
       ((va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38)
-      << (uint32_t)16U
-      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> (uint32_t)16U);
+      << 16U
+      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> 16U);
   abcd[2U] = v37;
   uint32_t va38 = abcd[1U];
   uint32_t vb39 = abcd[2U];
   uint32_t vc39 = abcd[3U];
   uint32_t vd39 = abcd[0U];
-  uint8_t *b39 = x + (uint32_t)40U;
+  uint8_t *b39 = x + 40U;
   uint32_t u38 = load32_le(b39);
   uint32_t xk38 = u38;
   uint32_t ti39 = _t[39U];
@@ -698,14 +690,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb39
     +
       ((va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39)
-      << (uint32_t)23U
-      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> (uint32_t)9U);
+      << 23U
+      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> 9U);
   abcd[1U] = v38;
   uint32_t va39 = abcd[0U];
   uint32_t vb40 = abcd[1U];
   uint32_t vc40 = abcd[2U];
   uint32_t vd40 = abcd[3U];
-  uint8_t *b40 = x + (uint32_t)52U;
+  uint8_t *b40 = x + 52U;
   uint32_t u39 = load32_le(b40);
   uint32_t xk39 = u39;
   uint32_t ti40 = _t[40U];
@@ -714,8 +706,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb40
     +
       ((va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40)
-      << (uint32_t)4U
-      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> (uint32_t)28U);
+      << 4U
+      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> 28U);
   abcd[0U] = v39;
   uint32_t va40 = abcd[3U];
   uint32_t vb41 = abcd[0U];
@@ -730,14 +722,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb41
     +
       ((va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41)
-      << (uint32_t)11U
-      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> (uint32_t)21U);
+      << 11U
+      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> 21U);
   abcd[3U] = v40;
   uint32_t va41 = abcd[2U];
   uint32_t vb42 = abcd[3U];
   uint32_t vc42 = abcd[0U];
   uint32_t vd42 = abcd[1U];
-  uint8_t *b42 = x + (uint32_t)12U;
+  uint8_t *b42 = x + 12U;
   uint32_t u41 = load32_le(b42);
   uint32_t xk41 = u41;
   uint32_t ti42 = _t[42U];
@@ -746,14 +738,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb42
     +
       ((va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42)
-      << (uint32_t)16U
-      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> (uint32_t)16U);
+      << 16U
+      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> 16U);
   abcd[2U] = v41;
   uint32_t va42 = abcd[1U];
   uint32_t vb43 = abcd[2U];
   uint32_t vc43 = abcd[3U];
   uint32_t vd43 = abcd[0U];
-  uint8_t *b43 = x + (uint32_t)24U;
+  uint8_t *b43 = x + 24U;
   uint32_t u42 = load32_le(b43);
   uint32_t xk42 = u42;
   uint32_t ti43 = _t[43U];
@@ -762,14 +754,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb43
     +
       ((va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43)
-      << (uint32_t)23U
-      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> (uint32_t)9U);
+      << 23U
+      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> 9U);
   abcd[1U] = v42;
   uint32_t va43 = abcd[0U];
   uint32_t vb44 = abcd[1U];
   uint32_t vc44 = abcd[2U];
   uint32_t vd44 = abcd[3U];
-  uint8_t *b44 = x + (uint32_t)36U;
+  uint8_t *b44 = x + 36U;
   uint32_t u43 = load32_le(b44);
   uint32_t xk43 = u43;
   uint32_t ti44 = _t[44U];
@@ -778,14 +770,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb44
     +
       ((va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44)
-      << (uint32_t)4U
-      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> (uint32_t)28U);
+      << 4U
+      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> 28U);
   abcd[0U] = v43;
   uint32_t va44 = abcd[3U];
   uint32_t vb45 = abcd[0U];
   uint32_t vc45 = abcd[1U];
   uint32_t vd45 = abcd[2U];
-  uint8_t *b45 = x + (uint32_t)48U;
+  uint8_t *b45 = x + 48U;
   uint32_t u44 = load32_le(b45);
   uint32_t xk44 = u44;
   uint32_t ti45 = _t[45U];
@@ -794,14 +786,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb45
     +
       ((va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45)
-      << (uint32_t)11U
-      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> (uint32_t)21U);
+      << 11U
+      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> 21U);
   abcd[3U] = v44;
   uint32_t va45 = abcd[2U];
   uint32_t vb46 = abcd[3U];
   uint32_t vc46 = abcd[0U];
   uint32_t vd46 = abcd[1U];
-  uint8_t *b46 = x + (uint32_t)60U;
+  uint8_t *b46 = x + 60U;
   uint32_t u45 = load32_le(b46);
   uint32_t xk45 = u45;
   uint32_t ti46 = _t[46U];
@@ -810,14 +802,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb46
     +
       ((va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46)
-      << (uint32_t)16U
-      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> (uint32_t)16U);
+      << 16U
+      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> 16U);
   abcd[2U] = v45;
   uint32_t va46 = abcd[1U];
   uint32_t vb47 = abcd[2U];
   uint32_t vc47 = abcd[3U];
   uint32_t vd47 = abcd[0U];
-  uint8_t *b47 = x + (uint32_t)8U;
+  uint8_t *b47 = x + 8U;
   uint32_t u46 = load32_le(b47);
   uint32_t xk46 = u46;
   uint32_t ti47 = _t[47U];
@@ -826,8 +818,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb47
     +
       ((va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47)
-      << (uint32_t)23U
-      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> (uint32_t)9U);
+      << 23U
+      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> 9U);
   abcd[1U] = v46;
   uint32_t va47 = abcd[0U];
   uint32_t vb48 = abcd[1U];
@@ -842,14 +834,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb48
     +
       ((va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48)
-      << (uint32_t)6U
-      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> (uint32_t)26U);
+      << 6U
+      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> 26U);
   abcd[0U] = v47;
   uint32_t va48 = abcd[3U];
   uint32_t vb49 = abcd[0U];
   uint32_t vc49 = abcd[1U];
   uint32_t vd49 = abcd[2U];
-  uint8_t *b49 = x + (uint32_t)28U;
+  uint8_t *b49 = x + 28U;
   uint32_t u48 = load32_le(b49);
   uint32_t xk48 = u48;
   uint32_t ti49 = _t[49U];
@@ -858,14 +850,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb49
     +
       ((va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49)
-      << (uint32_t)10U
-      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> (uint32_t)22U);
+      << 10U
+      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> 22U);
   abcd[3U] = v48;
   uint32_t va49 = abcd[2U];
   uint32_t vb50 = abcd[3U];
   uint32_t vc50 = abcd[0U];
   uint32_t vd50 = abcd[1U];
-  uint8_t *b50 = x + (uint32_t)56U;
+  uint8_t *b50 = x + 56U;
   uint32_t u49 = load32_le(b50);
   uint32_t xk49 = u49;
   uint32_t ti50 = _t[50U];
@@ -874,14 +866,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb50
     +
       ((va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50)
-      << (uint32_t)15U
-      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> (uint32_t)17U);
+      << 15U
+      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> 17U);
   abcd[2U] = v49;
   uint32_t va50 = abcd[1U];
   uint32_t vb51 = abcd[2U];
   uint32_t vc51 = abcd[3U];
   uint32_t vd51 = abcd[0U];
-  uint8_t *b51 = x + (uint32_t)20U;
+  uint8_t *b51 = x + 20U;
   uint32_t u50 = load32_le(b51);
   uint32_t xk50 = u50;
   uint32_t ti51 = _t[51U];
@@ -890,14 +882,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb51
     +
       ((va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51)
-      << (uint32_t)21U
-      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> (uint32_t)11U);
+      << 21U
+      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> 11U);
   abcd[1U] = v50;
   uint32_t va51 = abcd[0U];
   uint32_t vb52 = abcd[1U];
   uint32_t vc52 = abcd[2U];
   uint32_t vd52 = abcd[3U];
-  uint8_t *b52 = x + (uint32_t)48U;
+  uint8_t *b52 = x + 48U;
   uint32_t u51 = load32_le(b52);
   uint32_t xk51 = u51;
   uint32_t ti52 = _t[52U];
@@ -906,14 +898,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb52
     +
       ((va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52)
-      << (uint32_t)6U
-      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> (uint32_t)26U);
+      << 6U
+      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> 26U);
   abcd[0U] = v51;
   uint32_t va52 = abcd[3U];
   uint32_t vb53 = abcd[0U];
   uint32_t vc53 = abcd[1U];
   uint32_t vd53 = abcd[2U];
-  uint8_t *b53 = x + (uint32_t)12U;
+  uint8_t *b53 = x + 12U;
   uint32_t u52 = load32_le(b53);
   uint32_t xk52 = u52;
   uint32_t ti53 = _t[53U];
@@ -922,14 +914,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb53
     +
       ((va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53)
-      << (uint32_t)10U
-      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> (uint32_t)22U);
+      << 10U
+      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> 22U);
   abcd[3U] = v52;
   uint32_t va53 = abcd[2U];
   uint32_t vb54 = abcd[3U];
   uint32_t vc54 = abcd[0U];
   uint32_t vd54 = abcd[1U];
-  uint8_t *b54 = x + (uint32_t)40U;
+  uint8_t *b54 = x + 40U;
   uint32_t u53 = load32_le(b54);
   uint32_t xk53 = u53;
   uint32_t ti54 = _t[54U];
@@ -938,14 +930,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb54
     +
       ((va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54)
-      << (uint32_t)15U
-      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> (uint32_t)17U);
+      << 15U
+      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> 17U);
   abcd[2U] = v53;
   uint32_t va54 = abcd[1U];
   uint32_t vb55 = abcd[2U];
   uint32_t vc55 = abcd[3U];
   uint32_t vd55 = abcd[0U];
-  uint8_t *b55 = x + (uint32_t)4U;
+  uint8_t *b55 = x + 4U;
   uint32_t u54 = load32_le(b55);
   uint32_t xk54 = u54;
   uint32_t ti55 = _t[55U];
@@ -954,14 +946,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb55
     +
       ((va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55)
-      << (uint32_t)21U
-      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> (uint32_t)11U);
+      << 21U
+      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> 11U);
   abcd[1U] = v54;
   uint32_t va55 = abcd[0U];
   uint32_t vb56 = abcd[1U];
   uint32_t vc56 = abcd[2U];
   uint32_t vd56 = abcd[3U];
-  uint8_t *b56 = x + (uint32_t)32U;
+  uint8_t *b56 = x + 32U;
   uint32_t u55 = load32_le(b56);
   uint32_t xk55 = u55;
   uint32_t ti56 = _t[56U];
@@ -970,14 +962,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb56
     +
       ((va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56)
-      << (uint32_t)6U
-      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> (uint32_t)26U);
+      << 6U
+      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> 26U);
   abcd[0U] = v55;
   uint32_t va56 = abcd[3U];
   uint32_t vb57 = abcd[0U];
   uint32_t vc57 = abcd[1U];
   uint32_t vd57 = abcd[2U];
-  uint8_t *b57 = x + (uint32_t)60U;
+  uint8_t *b57 = x + 60U;
   uint32_t u56 = load32_le(b57);
   uint32_t xk56 = u56;
   uint32_t ti57 = _t[57U];
@@ -986,14 +978,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb57
     +
       ((va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57)
-      << (uint32_t)10U
-      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> (uint32_t)22U);
+      << 10U
+      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> 22U);
   abcd[3U] = v56;
   uint32_t va57 = abcd[2U];
   uint32_t vb58 = abcd[3U];
   uint32_t vc58 = abcd[0U];
   uint32_t vd58 = abcd[1U];
-  uint8_t *b58 = x + (uint32_t)24U;
+  uint8_t *b58 = x + 24U;
   uint32_t u57 = load32_le(b58);
   uint32_t xk57 = u57;
   uint32_t ti58 = _t[58U];
@@ -1002,14 +994,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb58
     +
       ((va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58)
-      << (uint32_t)15U
-      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> (uint32_t)17U);
+      << 15U
+      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> 17U);
   abcd[2U] = v57;
   uint32_t va58 = abcd[1U];
   uint32_t vb59 = abcd[2U];
   uint32_t vc59 = abcd[3U];
   uint32_t vd59 = abcd[0U];
-  uint8_t *b59 = x + (uint32_t)52U;
+  uint8_t *b59 = x + 52U;
   uint32_t u58 = load32_le(b59);
   uint32_t xk58 = u58;
   uint32_t ti59 = _t[59U];
@@ -1018,14 +1010,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb59
     +
       ((va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59)
-      << (uint32_t)21U
-      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> (uint32_t)11U);
+      << 21U
+      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> 11U);
   abcd[1U] = v58;
   uint32_t va59 = abcd[0U];
   uint32_t vb60 = abcd[1U];
   uint32_t vc60 = abcd[2U];
   uint32_t vd60 = abcd[3U];
-  uint8_t *b60 = x + (uint32_t)16U;
+  uint8_t *b60 = x + 16U;
   uint32_t u59 = load32_le(b60);
   uint32_t xk59 = u59;
   uint32_t ti60 = _t[60U];
@@ -1034,14 +1026,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb60
     +
       ((va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60)
-      << (uint32_t)6U
-      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> (uint32_t)26U);
+      << 6U
+      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> 26U);
   abcd[0U] = v59;
   uint32_t va60 = abcd[3U];
   uint32_t vb61 = abcd[0U];
   uint32_t vc61 = abcd[1U];
   uint32_t vd61 = abcd[2U];
-  uint8_t *b61 = x + (uint32_t)44U;
+  uint8_t *b61 = x + 44U;
   uint32_t u60 = load32_le(b61);
   uint32_t xk60 = u60;
   uint32_t ti61 = _t[61U];
@@ -1050,14 +1042,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb61
     +
       ((va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61)
-      << (uint32_t)10U
-      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> (uint32_t)22U);
+      << 10U
+      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> 22U);
   abcd[3U] = v60;
   uint32_t va61 = abcd[2U];
   uint32_t vb62 = abcd[3U];
   uint32_t vc62 = abcd[0U];
   uint32_t vd62 = abcd[1U];
-  uint8_t *b62 = x + (uint32_t)8U;
+  uint8_t *b62 = x + 8U;
   uint32_t u61 = load32_le(b62);
   uint32_t xk61 = u61;
   uint32_t ti62 = _t[62U];
@@ -1066,14 +1058,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb62
     +
       ((va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62)
-      << (uint32_t)15U
-      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> (uint32_t)17U);
+      << 15U
+      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> 17U);
   abcd[2U] = v61;
   uint32_t va62 = abcd[1U];
   uint32_t vb = abcd[2U];
   uint32_t vc = abcd[3U];
   uint32_t vd = abcd[0U];
-  uint8_t *b63 = x + (uint32_t)36U;
+  uint8_t *b63 = x + 36U;
   uint32_t u62 = load32_le(b63);
   uint32_t xk62 = u62;
   uint32_t ti = _t[63U];
@@ -1082,8 +1074,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb
     +
       ((va62 + (vc ^ (vb | ~vd)) + xk62 + ti)
-      << (uint32_t)21U
-      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> (uint32_t)11U);
+      << 21U
+      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> 11U);
   abcd[1U] = v62;
   uint32_t a = abcd[0U];
   uint32_t b = abcd[1U];
@@ -1098,42 +1090,26 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
 static void legacy_pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_le(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_le(dst3, len << 3U);
 }
 
 void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(dst + i * 4U, s[i]););
 }
 
 void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
     legacy_update(s, block);
   }
@@ -1147,20 +1123,14 @@ Hacl_Hash_MD5_legacy_update_last(
   uint32_t input_len
 )
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
   Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
@@ -1168,25 +1138,23 @@ Hacl_Hash_MD5_legacy_update_last(
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
   legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / 64U);
 }
 
 void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
 {
-  uint32_t
-  s[4U] =
-    { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -1202,10 +1170,10 @@ void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
 
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -1220,7 +1188,7 @@ void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_Hash_Core_MD5_legacy_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -1232,33 +1200,33 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
 {
   Hacl_Streaming_MD_state_32 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -1273,40 +1241,40 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -1321,7 +1289,7 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_32 s1 = *p;
@@ -1329,13 +1297,13 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -1354,39 +1322,33 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -1409,29 +1371,29 @@ void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *ds
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[4U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)4U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 4U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_Hash_MD5_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
   Hacl_Hash_Core_MD5_legacy_finish(tmp_block_state, dst);
@@ -1453,10 +1415,10 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_sta
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)4U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 4U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
diff --git a/src/Hacl_Hash_SHA1.c b/src/Hacl_Hash_SHA1.c
index 5ecb3c0b..61509182 100644
--- a/src/Hacl_Hash_SHA1.c
+++ b/src/Hacl_Hash_SHA1.c
@@ -25,16 +25,11 @@
 
 #include "internal/Hacl_Hash_SHA1.h"
 
-static uint32_t
-_h0[5U] =
-  {
-    (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-    (uint32_t)0xc3d2e1f0U
-  };
+static uint32_t _h0[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
 
 void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i] = _h0[i];);
 }
 
 static void legacy_update(uint32_t *h, uint8_t *l)
@@ -45,29 +40,26 @@ static void legacy_update(uint32_t *h, uint8_t *l)
   uint32_t hd = h[3U];
   uint32_t he = h[4U];
   uint32_t _w[80U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t v;
-    if (i < (uint32_t)16U)
+    if (i < 16U)
     {
-      uint8_t *b = l + i * (uint32_t)4U;
+      uint8_t *b = l + i * 4U;
       uint32_t u = load32_be(b);
       v = u;
     }
     else
     {
-      uint32_t wmit3 = _w[i - (uint32_t)3U];
-      uint32_t wmit8 = _w[i - (uint32_t)8U];
-      uint32_t wmit14 = _w[i - (uint32_t)14U];
-      uint32_t wmit16 = _w[i - (uint32_t)16U];
-      v =
-        (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16)))
-        << (uint32_t)1U
-        | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U;
+      uint32_t wmit3 = _w[i - 3U];
+      uint32_t wmit8 = _w[i - 8U];
+      uint32_t wmit14 = _w[i - 14U];
+      uint32_t wmit16 = _w[i - 16U];
+      v = (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) << 1U | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> 31U;
     }
     _w[i] = v;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t _a = h[0U];
     uint32_t _b = h[1U];
@@ -76,11 +68,11 @@ static void legacy_update(uint32_t *h, uint8_t *l)
     uint32_t _e = h[4U];
     uint32_t wmit = _w[i];
     uint32_t ite0;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
       ite0 = (_b & _c) ^ (~_b & _d);
     }
-    else if ((uint32_t)39U < i && i < (uint32_t)60U)
+    else if (39U < i && i < 60U)
     {
       ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d));
     }
@@ -89,32 +81,32 @@ static void legacy_update(uint32_t *h, uint8_t *l)
       ite0 = _b ^ (_c ^ _d);
     }
     uint32_t ite;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
-      ite = (uint32_t)0x5a827999U;
+      ite = 0x5a827999U;
     }
-    else if (i < (uint32_t)40U)
+    else if (i < 40U)
     {
-      ite = (uint32_t)0x6ed9eba1U;
+      ite = 0x6ed9eba1U;
     }
-    else if (i < (uint32_t)60U)
+    else if (i < 60U)
     {
-      ite = (uint32_t)0x8f1bbcdcU;
+      ite = 0x8f1bbcdcU;
     }
     else
     {
-      ite = (uint32_t)0xca62c1d6U;
+      ite = 0xca62c1d6U;
     }
-    uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit;
+    uint32_t _T = (_a << 5U | _a >> 27U) + ite0 + _e + ite + wmit;
     h[0U] = _T;
     h[1U] = _a;
-    h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U;
+    h[2U] = _b << 30U | _b >> 2U;
     h[3U] = _c;
     h[4U] = _d;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
-    _w[i] = (uint32_t)0U;
+    _w[i] = 0U;
   }
   uint32_t sta = h[0U];
   uint32_t stb = h[1U];
@@ -131,42 +123,26 @@ static void legacy_update(uint32_t *h, uint8_t *l)
 static void legacy_pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_be(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_be(dst3, len << 3U);
 }
 
 void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR5(i,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
-    store32_be(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, store32_be(dst + i * 4U, s[i]););
 }
 
 void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
     legacy_update(s, block);
   }
@@ -180,20 +156,14 @@ Hacl_Hash_SHA1_legacy_update_last(
   uint32_t input_len
 )
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
   Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
@@ -201,28 +171,23 @@ Hacl_Hash_SHA1_legacy_update_last(
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
   legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / 64U);
 }
 
 void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
 {
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -238,10 +203,10 @@ void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst
 
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -256,7 +221,7 @@ void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_Hash_Core_SHA1_legacy_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -268,33 +233,33 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
 {
   Hacl_Streaming_MD_state_32 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -309,40 +274,40 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -357,7 +322,7 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_32 s1 = *p;
@@ -365,13 +330,13 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -390,39 +355,33 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -445,29 +404,29 @@ void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *d
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[5U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 5U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
   Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst);
@@ -489,10 +448,10 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_st
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)5U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 5U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
diff --git a/src/Hacl_Hash_SHA2.c b/src/Hacl_Hash_SHA2.c
index c93c3616..934ae3e2 100644
--- a/src/Hacl_Hash_SHA2.c
+++ b/src/Hacl_Hash_SHA2.c
@@ -30,9 +30,9 @@
 void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
@@ -42,49 +42,49 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
 {
   uint32_t hash_old[8U] = { 0U };
   uint32_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint32_t));
   uint8_t *b10 = b;
   uint32_t u = load32_be(b10);
   ws[0U] = u;
-  uint32_t u0 = load32_be(b10 + (uint32_t)4U);
+  uint32_t u0 = load32_be(b10 + 4U);
   ws[1U] = u0;
-  uint32_t u1 = load32_be(b10 + (uint32_t)8U);
+  uint32_t u1 = load32_be(b10 + 8U);
   ws[2U] = u1;
-  uint32_t u2 = load32_be(b10 + (uint32_t)12U);
+  uint32_t u2 = load32_be(b10 + 12U);
   ws[3U] = u2;
-  uint32_t u3 = load32_be(b10 + (uint32_t)16U);
+  uint32_t u3 = load32_be(b10 + 16U);
   ws[4U] = u3;
-  uint32_t u4 = load32_be(b10 + (uint32_t)20U);
+  uint32_t u4 = load32_be(b10 + 20U);
   ws[5U] = u4;
-  uint32_t u5 = load32_be(b10 + (uint32_t)24U);
+  uint32_t u5 = load32_be(b10 + 24U);
   ws[6U] = u5;
-  uint32_t u6 = load32_be(b10 + (uint32_t)28U);
+  uint32_t u6 = load32_be(b10 + 28U);
   ws[7U] = u6;
-  uint32_t u7 = load32_be(b10 + (uint32_t)32U);
+  uint32_t u7 = load32_be(b10 + 32U);
   ws[8U] = u7;
-  uint32_t u8 = load32_be(b10 + (uint32_t)36U);
+  uint32_t u8 = load32_be(b10 + 36U);
   ws[9U] = u8;
-  uint32_t u9 = load32_be(b10 + (uint32_t)40U);
+  uint32_t u9 = load32_be(b10 + 40U);
   ws[10U] = u9;
-  uint32_t u10 = load32_be(b10 + (uint32_t)44U);
+  uint32_t u10 = load32_be(b10 + 44U);
   ws[11U] = u10;
-  uint32_t u11 = load32_be(b10 + (uint32_t)48U);
+  uint32_t u11 = load32_be(b10 + 48U);
   ws[12U] = u11;
-  uint32_t u12 = load32_be(b10 + (uint32_t)52U);
+  uint32_t u12 = load32_be(b10 + 52U);
   ws[13U] = u12;
-  uint32_t u13 = load32_be(b10 + (uint32_t)56U);
+  uint32_t u13 = load32_be(b10 + 56U);
   ws[14U] = u13;
-  uint32_t u14 = load32_be(b10 + (uint32_t)60U);
+  uint32_t u14 = load32_be(b10 + 60U);
   ws[15U] = u14;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       uint32_t ws_t = ws[i];
       uint32_t a0 = hash[0U];
       uint32_t b0 = hash[1U];
@@ -98,20 +98,13 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       uint32_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U)
-          ^
-            ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U)
-            ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U)))
+        + ((e0 << 26U | e0 >> 6U) ^ ((e0 << 21U | e0 >> 11U) ^ (e0 << 7U | e0 >> 25U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint32_t
       t2 =
-        ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U)
-        ^
-          ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U)
-          ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U)))
+        ((a0 << 30U | a0 >> 2U) ^ ((a0 << 19U | a0 >> 13U) ^ (a0 << 10U | a0 >> 22U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint32_t a1 = t1 + t2;
       uint32_t b1 = a0;
@@ -129,30 +122,24 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint32_t t16 = ws[i];
-        uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint32_t
-        s1 =
-          (t2 << (uint32_t)15U | t2 >> (uint32_t)17U)
-          ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U);
-        uint32_t
-        s0 =
-          (t15 << (uint32_t)25U | t15 >> (uint32_t)7U)
-          ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U);
+        uint32_t t15 = ws[(i + 1U) % 16U];
+        uint32_t t7 = ws[(i + 9U) % 16U];
+        uint32_t t2 = ws[(i + 14U) % 16U];
+        uint32_t s1 = (t2 << 15U | t2 >> 17U) ^ ((t2 << 13U | t2 >> 19U) ^ t2 >> 10U);
+        uint32_t s0 = (t15 << 25U | t15 >> 7U) ^ ((t15 << 14U | t15 >> 18U) ^ t15 >> 3U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = hash[i] + hash_old[i];
     os[i] = x;);
@@ -160,11 +147,11 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
 
 void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)64U;
+    uint8_t *mb = b0 + i * 64U;
     sha256_update(mb, st);
   }
 }
@@ -178,25 +165,25 @@ Hacl_SHA2_Scalar32_sha256_update_last(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[128U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)64U;
+  uint8_t *last10 = last + 64U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -204,7 +191,7 @@ Hacl_SHA2_Scalar32_sha256_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha256_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update(last1, hash);
     return;
@@ -214,20 +201,16 @@ Hacl_SHA2_Scalar32_sha256_update_last(
 void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 32U * sizeof (uint8_t));
 }
 
 void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
     os[i] = x;);
@@ -247,20 +230,16 @@ Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b,
 void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 28U * sizeof (uint8_t));
 }
 
 void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
     os[i] = x;);
@@ -270,49 +249,49 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
 {
   uint64_t hash_old[8U] = { 0U };
   uint64_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint64_t));
   uint8_t *b10 = b;
   uint64_t u = load64_be(b10);
   ws[0U] = u;
-  uint64_t u0 = load64_be(b10 + (uint32_t)8U);
+  uint64_t u0 = load64_be(b10 + 8U);
   ws[1U] = u0;
-  uint64_t u1 = load64_be(b10 + (uint32_t)16U);
+  uint64_t u1 = load64_be(b10 + 16U);
   ws[2U] = u1;
-  uint64_t u2 = load64_be(b10 + (uint32_t)24U);
+  uint64_t u2 = load64_be(b10 + 24U);
   ws[3U] = u2;
-  uint64_t u3 = load64_be(b10 + (uint32_t)32U);
+  uint64_t u3 = load64_be(b10 + 32U);
   ws[4U] = u3;
-  uint64_t u4 = load64_be(b10 + (uint32_t)40U);
+  uint64_t u4 = load64_be(b10 + 40U);
   ws[5U] = u4;
-  uint64_t u5 = load64_be(b10 + (uint32_t)48U);
+  uint64_t u5 = load64_be(b10 + 48U);
   ws[6U] = u5;
-  uint64_t u6 = load64_be(b10 + (uint32_t)56U);
+  uint64_t u6 = load64_be(b10 + 56U);
   ws[7U] = u6;
-  uint64_t u7 = load64_be(b10 + (uint32_t)64U);
+  uint64_t u7 = load64_be(b10 + 64U);
   ws[8U] = u7;
-  uint64_t u8 = load64_be(b10 + (uint32_t)72U);
+  uint64_t u8 = load64_be(b10 + 72U);
   ws[9U] = u8;
-  uint64_t u9 = load64_be(b10 + (uint32_t)80U);
+  uint64_t u9 = load64_be(b10 + 80U);
   ws[10U] = u9;
-  uint64_t u10 = load64_be(b10 + (uint32_t)88U);
+  uint64_t u10 = load64_be(b10 + 88U);
   ws[11U] = u10;
-  uint64_t u11 = load64_be(b10 + (uint32_t)96U);
+  uint64_t u11 = load64_be(b10 + 96U);
   ws[12U] = u11;
-  uint64_t u12 = load64_be(b10 + (uint32_t)104U);
+  uint64_t u12 = load64_be(b10 + 104U);
   ws[13U] = u12;
-  uint64_t u13 = load64_be(b10 + (uint32_t)112U);
+  uint64_t u13 = load64_be(b10 + 112U);
   ws[14U] = u13;
-  uint64_t u14 = load64_be(b10 + (uint32_t)120U);
+  uint64_t u14 = load64_be(b10 + 120U);
   ws[15U] = u14;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[16U * i0 + i];
       uint64_t ws_t = ws[i];
       uint64_t a0 = hash[0U];
       uint64_t b0 = hash[1U];
@@ -326,20 +305,13 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       uint64_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U)
-          ^
-            ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U)
-            ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U)))
+        + ((e0 << 50U | e0 >> 14U) ^ ((e0 << 46U | e0 >> 18U) ^ (e0 << 23U | e0 >> 41U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint64_t
       t2 =
-        ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U)
-        ^
-          ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U)
-          ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U)))
+        ((a0 << 36U | a0 >> 28U) ^ ((a0 << 30U | a0 >> 34U) ^ (a0 << 25U | a0 >> 39U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint64_t a1 = t1 + t2;
       uint64_t b1 = a0;
@@ -357,30 +329,24 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint64_t t16 = ws[i];
-        uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint64_t
-        s1 =
-          (t2 << (uint32_t)45U | t2 >> (uint32_t)19U)
-          ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U);
-        uint64_t
-        s0 =
-          (t15 << (uint32_t)63U | t15 >> (uint32_t)1U)
-          ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U);
+        uint64_t t15 = ws[(i + 1U) % 16U];
+        uint64_t t7 = ws[(i + 9U) % 16U];
+        uint64_t t2 = ws[(i + 14U) % 16U];
+        uint64_t s1 = (t2 << 45U | t2 >> 19U) ^ ((t2 << 3U | t2 >> 61U) ^ t2 >> 6U);
+        uint64_t s0 = (t15 << 63U | t15 >> 1U) ^ ((t15 << 56U | t15 >> 8U) ^ t15 >> 7U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = hash[i] + hash_old[i];
     os[i] = x;);
@@ -388,11 +354,11 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
 
 void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)128U;
+    uint8_t *mb = b0 + i * 128U;
     sha512_update(mb, st);
   }
 }
@@ -406,25 +372,25 @@ Hacl_SHA2_Scalar32_sha512_update_last(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[256U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
+  uint8_t *last10 = last + 128U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -432,7 +398,7 @@ Hacl_SHA2_Scalar32_sha512_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha512_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update(last1, hash);
     return;
@@ -442,20 +408,16 @@ Hacl_SHA2_Scalar32_sha512_update_last(
 void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)64U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 64U * sizeof (uint8_t));
 }
 
 void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
     os[i] = x;);
@@ -480,12 +442,8 @@ Hacl_SHA2_Scalar32_sha384_update_last(
 void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)48U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 48U * sizeof (uint8_t));
 }
 
 /**
@@ -494,10 +452,10 @@ calling `free_256`.
 */
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -517,10 +475,10 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -539,7 +497,7 @@ void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha256_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -548,33 +506,33 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
 {
   Hacl_Streaming_MD_state_32 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -589,42 +547,40 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data1,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -639,7 +595,7 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_32 s1 = *p;
@@ -647,13 +603,13 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -672,41 +628,33 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data11,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / 64U * 64U, data11, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -752,29 +700,29 @@ void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha256_update_last(prev_len_last + (uint64_t)r,
     r,
@@ -807,10 +755,10 @@ void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint32_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha256_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   Hacl_SHA2_Scalar32_sha256_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha256_update_last(len_, rem, lb, st);
@@ -819,10 +767,10 @@ void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *d
 
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -837,7 +785,7 @@ void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha224_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -863,29 +811,29 @@ void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  sha224_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha224_update_last(prev_len_last + (uint64_t)r,
     r,
@@ -908,10 +856,10 @@ void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint32_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha224_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha224_update_last(len_, rem, lb, st);
@@ -920,10 +868,10 @@ void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *d
 
 Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
@@ -943,10 +891,10 @@ Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state
   uint64_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)128U * sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  memcpy(buf, buf0, 128U * sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_64
@@ -962,7 +910,7 @@ void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s)
   uint64_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha512_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -971,33 +919,33 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
 {
   Hacl_Streaming_MD_state_64 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)18446744073709551615U - total_len)
+  if ((uint64_t)len > 18446744073709551615ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (len <= 128U - sz)
   {
     Hacl_Streaming_MD_state_64 s1 = *p;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -1012,42 +960,40 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_64 s1 = *p;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)128U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data1,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -1062,7 +1008,7 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
+    uint32_t diff = 128U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_64 s1 = *p;
@@ -1070,13 +1016,13 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -1095,41 +1041,33 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data11,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / 128U * 128U, data11, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -1175,29 +1113,29 @@ void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
@@ -1231,10 +1169,10 @@ void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint64_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha512_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   Hacl_SHA2_Scalar32_sha512_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha512_update_last(len_, rem, lb, st);
@@ -1243,10 +1181,10 @@ void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *d
 
 Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
@@ -1261,7 +1199,7 @@ void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s)
   uint64_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha384_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -1287,29 +1225,29 @@ void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
@@ -1333,10 +1271,10 @@ void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint64_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha384_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   Hacl_SHA2_Scalar32_sha384_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha384_update_last(len_, rem, lb, st);
diff --git a/src/Hacl_Hash_SHA3.c b/src/Hacl_Hash_SHA3.c
index 19d13b1b..51608a91 100644
--- a/src/Hacl_Hash_SHA3.c
+++ b/src/Hacl_Hash_SHA3.c
@@ -31,27 +31,27 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     default:
       {
@@ -67,19 +67,19 @@ static uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -97,7 +97,7 @@ Hacl_Hash_SHA3_update_multi_sha3(
   uint32_t n_blocks
 )
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = blocks + i * block_len(a);
     Hacl_Impl_SHA3_absorb_inner(block_len(a), block, s);
@@ -115,11 +115,11 @@ Hacl_Hash_SHA3_update_last_sha3(
   uint8_t suffix;
   if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256)
   {
-    suffix = (uint8_t)0x1fU;
+    suffix = 0x1fU;
   }
   else
   {
-    suffix = (uint8_t)0x06U;
+    suffix = 0x06U;
   }
   uint32_t len = block_len(a);
   if (input_len == len)
@@ -127,16 +127,16 @@ Hacl_Hash_SHA3_update_last_sha3(
     Hacl_Impl_SHA3_absorb_inner(len, input, s);
     uint8_t lastBlock_[200U] = { 0U };
     uint8_t *lastBlock = lastBlock_;
-    memcpy(lastBlock, input + input_len, (uint32_t)0U * sizeof (uint8_t));
+    memcpy(lastBlock, input + input_len, 0U * sizeof (uint8_t));
     lastBlock[0U] = suffix;
     Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-    if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U)
+    if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U == len - 1U)
     {
       Hacl_Impl_SHA3_state_permute(s);
     }
     uint8_t nextBlock_[200U] = { 0U };
     uint8_t *nextBlock = nextBlock_;
-    nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
+    nextBlock[len - 1U] = 0x80U;
     Hacl_Impl_SHA3_loadState(len, nextBlock, s);
     Hacl_Impl_SHA3_state_permute(s);
     return;
@@ -146,13 +146,13 @@ Hacl_Hash_SHA3_update_last_sha3(
   memcpy(lastBlock, input, input_len * sizeof (uint8_t));
   lastBlock[input_len] = suffix;
   Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-  if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && input_len == len - (uint32_t)1U)
+  if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len == len - 1U)
   {
     Hacl_Impl_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
+  nextBlock[len - 1U] = 0x80U;
   Hacl_Impl_SHA3_loadState(len, nextBlock, s);
   Hacl_Impl_SHA3_state_permute(s);
 }
@@ -174,15 +174,15 @@ Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_
 {
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
   uint8_t *buf0 = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
   Hacl_Streaming_Keccak_hash_buf block_state = { .fst = a, .snd = buf };
   Hacl_Streaming_Keccak_state
-  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)0U };
   Hacl_Streaming_Keccak_state
   *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
   p[0U] = s;
   uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+  memset(s1, 0U, 25U * sizeof (uint64_t));
   return p;
 }
 
@@ -207,12 +207,12 @@ Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_st
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(i));
   uint8_t *buf1 = (uint8_t *)KRML_HOST_CALLOC(block_len(i), sizeof (uint8_t));
   memcpy(buf1, buf0, block_len(i) * sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
   Hacl_Streaming_Keccak_hash_buf block_state = { .fst = i, .snd = buf };
   hash_buf2 scrut = { .fst = block_state0, .snd = block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
   Hacl_Streaming_Keccak_state
   s = { .block_state = block_state, .buf = buf1, .total_len = total_len0 };
   Hacl_Streaming_Keccak_state
@@ -227,11 +227,11 @@ void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s)
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  KRML_HOST_IGNORE(i);
+  KRML_MAYBE_UNUSED_VAR(i);
   uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+  memset(s1, 0U, 25U * sizeof (uint64_t));
   Hacl_Streaming_Keccak_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -242,12 +242,12 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
   Hacl_Streaming_Keccak_hash_buf block_state = s.block_state;
   uint64_t total_len = s.total_len;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  if ((uint64_t)len > (uint64_t)0xFFFFFFFFFFFFFFFFU - total_len)
+  if ((uint64_t)len > 0xFFFFFFFFFFFFFFFFULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)block_len(i) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(i) == 0ULL && total_len > 0ULL)
   {
     sz = block_len(i);
   }
@@ -262,7 +262,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -283,14 +283,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Keccak_state s1 = *p;
     Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -298,14 +298,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
       Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)block_len(i) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)block_len(i) == 0ULL && (uint64_t)len > 0ULL)
     {
       ite = block_len(i);
     }
@@ -343,7 +343,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)block_len(i) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)block_len(i) == 0ULL && total_len10 > 0ULL)
     {
       sz10 = block_len(i);
     }
@@ -368,7 +368,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -376,20 +376,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
       Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)block_len(i)
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)block_len(i) == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
       ite = block_len(i);
     }
@@ -433,7 +427,7 @@ finish_(
   uint8_t *buf_ = scrut0.buf;
   uint64_t total_len = scrut0.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)block_len(a) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(a) == 0ULL && total_len > 0ULL)
   {
     r = block_len(a);
   }
@@ -447,9 +441,9 @@ finish_(
   hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % block_len(a) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(a) == 0U && r > 0U)
   {
     ite = block_len(a);
   }
@@ -461,7 +455,7 @@ finish_(
   uint8_t *buf_multi = buf_1;
   Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst;
   uint64_t *s0 = tmp_block_state.snd;
-  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, (uint32_t)0U / block_len(a1));
+  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, 0U / block_len(a1));
   Spec_Hash_Definitions_hash_alg a10 = tmp_block_state.fst;
   uint64_t *s1 = tmp_block_state.snd;
   Hacl_Hash_SHA3_update_last_sha3(a10, s1, buf_last, r);
@@ -495,7 +489,7 @@ Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint
   {
     return Hacl_Streaming_Types_InvalidAlgorithm;
   }
-  if (l == (uint32_t)0U)
+  if (l == 0U)
   {
     return Hacl_Streaming_Types_InvalidLength;
   }
@@ -529,13 +523,7 @@ Hacl_SHA3_shake128_hacl(
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1344U,
-    (uint32_t)256U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Impl_SHA3_keccak(1344U, 256U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
 void
@@ -546,169 +534,99 @@ Hacl_SHA3_shake256_hacl(
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Impl_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
 void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1152U,
-    (uint32_t)448U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)28U,
-    output);
+  Hacl_Impl_SHA3_keccak(1152U, 448U, inputByteLen, input, 0x06U, 28U, output);
 }
 
 void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)32U,
-    output);
+  Hacl_Impl_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x06U, 32U, output);
 }
 
 void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)832U,
-    (uint32_t)768U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)48U,
-    output);
+  Hacl_Impl_SHA3_keccak(832U, 768U, inputByteLen, input, 0x06U, 48U, output);
 }
 
 void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)576U,
-    (uint32_t)1024U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)64U,
-    output);
+  Hacl_Impl_SHA3_keccak(576U, 1024U, inputByteLen, input, 0x06U, 64U, output);
 }
 
 static const
 uint32_t
 keccak_rotc[24U] =
   {
-    (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, (uint32_t)15U, (uint32_t)21U,
-    (uint32_t)28U, (uint32_t)36U, (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U,
-    (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, (uint32_t)25U, (uint32_t)43U,
-    (uint32_t)62U, (uint32_t)18U, (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U
+    1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U,
+    39U, 61U, 20U, 44U
   };
 
 static const
 uint32_t
 keccak_piln[24U] =
   {
-    (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, (uint32_t)18U, (uint32_t)3U,
-    (uint32_t)5U, (uint32_t)16U, (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U,
-    (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)20U, (uint32_t)14U, (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U
+    10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U,
+    22U, 9U, 6U, 1U
   };
 
 static const
 uint64_t
 keccak_rndc[24U] =
   {
-    (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, (uint64_t)0x800000000000808aU,
-    (uint64_t)0x8000000080008000U, (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U,
-    (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, (uint64_t)0x000000000000008aU,
-    (uint64_t)0x0000000000000088U, (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU,
-    (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, (uint64_t)0x8000000000008089U,
-    (uint64_t)0x8000000000008003U, (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U,
-    (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, (uint64_t)0x8000000080008081U,
-    (uint64_t)0x8000000000008080U, (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U
+    0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
+    0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
+    0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+    0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
+    0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL,
+    0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
   };
 
 void Hacl_Impl_SHA3_state_permute(uint64_t *s)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++)
+  for (uint32_t i0 = 0U; i0 < 24U; i0++)
   {
     uint64_t _C[5U] = { 0U };
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      _C[i] =
-        s[i
-        + (uint32_t)0U]
-        ^
-          (s[i
-          + (uint32_t)5U]
-          ^ (s[i + (uint32_t)10U] ^ (s[i + (uint32_t)15U] ^ s[i + (uint32_t)20U]))););
+      0U,
+      5U,
+      1U,
+      _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U]))););
     KRML_MAYBE_FOR5(i1,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t uu____0 = _C[(i1 + (uint32_t)1U) % (uint32_t)5U];
-      uint64_t
-      _D =
-        _C[(i1 + (uint32_t)4U)
-        % (uint32_t)5U]
-        ^ (uu____0 << (uint32_t)1U | uu____0 >> (uint32_t)63U);
-      KRML_MAYBE_FOR5(i,
-        (uint32_t)0U,
-        (uint32_t)5U,
-        (uint32_t)1U,
-        s[i1 + (uint32_t)5U * i] = s[i1 + (uint32_t)5U * i] ^ _D;););
+      0U,
+      5U,
+      1U,
+      uint64_t uu____0 = _C[(i1 + 1U) % 5U];
+      uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U);
+      KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;););
     uint64_t x = s[1U];
     uint64_t current = x;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+    for (uint32_t i = 0U; i < 24U; i++)
     {
       uint32_t _Y = keccak_piln[i];
       uint32_t r = keccak_rotc[i];
       uint64_t temp = s[_Y];
       uint64_t uu____1 = current;
-      s[_Y] = uu____1 << r | uu____1 >> ((uint32_t)64U - r);
+      s[_Y] = uu____1 << r | uu____1 >> (64U - r);
       current = temp;
     }
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t
-      v0 =
-        s[(uint32_t)0U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)1U + (uint32_t)5U * i] & s[(uint32_t)2U + (uint32_t)5U * i]);
-      uint64_t
-      v1 =
-        s[(uint32_t)1U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)2U + (uint32_t)5U * i] & s[(uint32_t)3U + (uint32_t)5U * i]);
-      uint64_t
-      v2 =
-        s[(uint32_t)2U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)3U + (uint32_t)5U * i] & s[(uint32_t)4U + (uint32_t)5U * i]);
-      uint64_t
-      v3 =
-        s[(uint32_t)3U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)4U + (uint32_t)5U * i] & s[(uint32_t)0U + (uint32_t)5U * i]);
-      uint64_t
-      v4 =
-        s[(uint32_t)4U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)0U + (uint32_t)5U * i] & s[(uint32_t)1U + (uint32_t)5U * i]);
-      s[(uint32_t)0U + (uint32_t)5U * i] = v0;
-      s[(uint32_t)1U + (uint32_t)5U * i] = v1;
-      s[(uint32_t)2U + (uint32_t)5U * i] = v2;
-      s[(uint32_t)3U + (uint32_t)5U * i] = v3;
-      s[(uint32_t)4U + (uint32_t)5U * i] = v4;);
+      0U,
+      5U,
+      1U,
+      uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]);
+      uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]);
+      uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]);
+      uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]);
+      uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]);
+      s[0U + 5U * i] = v0;
+      s[1U + 5U * i] = v1;
+      s[2U + 5U * i] = v2;
+      s[3U + 5U * i] = v3;
+      s[4U + 5U * i] = v4;);
     uint64_t c = keccak_rndc[i0];
     s[0U] = s[0U] ^ c;
   }
@@ -718,9 +636,9 @@ void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 {
   uint8_t block[200U] = { 0U };
   memcpy(block, input, rateInBytes * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    uint64_t u = load64_le(block + i * (uint32_t)8U);
+    uint64_t u = load64_le(block + i * 8U);
     uint64_t x = u;
     s[i] = s[i] ^ x;
   }
@@ -729,10 +647,10 @@ void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res)
 {
   uint8_t block[200U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
     uint64_t sj = s[i];
-    store64_le(block + i * (uint32_t)8U, sj);
+    store64_le(block + i * 8U, sj);
   }
   memcpy(res, block, rateInBytes * sizeof (uint8_t));
 }
@@ -754,7 +672,7 @@ absorb(
 {
   uint32_t n_blocks = inputByteLen / rateInBytes;
   uint32_t rem = inputByteLen % rateInBytes;
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = input + i * rateInBytes;
     Hacl_Impl_SHA3_absorb_inner(rateInBytes, block, s);
@@ -765,13 +683,13 @@ absorb(
   memcpy(lastBlock, last, rem * sizeof (uint8_t));
   lastBlock[rem] = delimitedSuffix;
   Hacl_Impl_SHA3_loadState(rateInBytes, lastBlock, s);
-  if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && rem == rateInBytes - (uint32_t)1U)
+  if (!(((uint32_t)delimitedSuffix & 0x80U) == 0U) && rem == rateInBytes - 1U)
   {
     Hacl_Impl_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U;
+  nextBlock[rateInBytes - 1U] = 0x80U;
   Hacl_Impl_SHA3_loadState(rateInBytes, nextBlock, s);
   Hacl_Impl_SHA3_state_permute(s);
 }
@@ -788,7 +706,7 @@ Hacl_Impl_SHA3_squeeze(
   uint32_t remOut = outputByteLen % rateInBytes;
   uint8_t *last = output + outputByteLen - remOut;
   uint8_t *blocks = output;
-  for (uint32_t i = (uint32_t)0U; i < outBlocks; i++)
+  for (uint32_t i = 0U; i < outBlocks; i++)
   {
     storeState(rateInBytes, s, blocks + i * rateInBytes);
     Hacl_Impl_SHA3_state_permute(s);
@@ -807,8 +725,8 @@ Hacl_Impl_SHA3_keccak(
   uint8_t *output
 )
 {
-  KRML_HOST_IGNORE(capacity);
-  uint32_t rateInBytes = rate / (uint32_t)8U;
+  KRML_MAYBE_UNUSED_VAR(capacity);
+  uint32_t rateInBytes = rate / 8U;
   uint64_t s[25U] = { 0U };
   absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix);
   Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output);
diff --git a/src/Hacl_K256_ECDSA.c b/src/Hacl_K256_ECDSA.c
index 2ffc1060..ff2d8823 100644
--- a/src/Hacl_K256_ECDSA.c
+++ b/src/Hacl_K256_ECDSA.c
@@ -35,27 +35,27 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 {
   uint64_t *a0 = a;
   uint64_t *res0 = res;
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < bLen / 4U; i++)
   {
-    uint64_t t1 = a0[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res0 + (uint32_t)4U * i;
+    uint64_t t1 = a0[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res0 + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a0[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res0 + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a0[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res0 + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a0[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res0 + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a0[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res0 + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a0[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res0 + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a0[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res0 + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = bLen / (uint32_t)4U * (uint32_t)4U; i < bLen; i++)
+  for (uint32_t i = bLen / 4U * 4U; i < bLen; i++)
   {
     uint64_t t1 = a0[i];
     uint64_t t2 = b[i];
@@ -68,26 +68,26 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
     uint64_t *a1 = a + bLen;
     uint64_t *res1 = res + bLen;
     uint64_t c = c00;
-    for (uint32_t i = (uint32_t)0U; i < (aLen - bLen) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (aLen - bLen) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
-    for (uint32_t i = (aLen - bLen) / (uint32_t)4U * (uint32_t)4U; i < aLen - bLen; i++)
+    for (uint32_t i = (aLen - bLen) / 4U * 4U; i < aLen - bLen; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
     return c1;
@@ -97,23 +97,23 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 
 static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -121,52 +121,52 @@ static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -174,53 +174,53 @@ static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -228,59 +228,59 @@ static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void mul4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static void sqr4(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -288,30 +288,30 @@ static void sqr4(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline uint64_t is_qelem_zero(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -325,33 +325,33 @@ static inline bool is_qelem_zero_vartime(uint64_t *f)
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  return f0 == (uint64_t)0U && f1 == (uint64_t)0U && f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL;
 }
 
 static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   uint64_t is_zero = is_qelem_zero(f);
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(f[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(f[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t is_lt_q = acc;
   return ~is_zero & is_lt_q;
 }
@@ -359,11 +359,11 @@ static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   bool is_zero = is_qelem_zero_vartime(f);
@@ -372,29 +372,29 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   bool is_lt_q_b;
-  if (a3 < (uint64_t)0xffffffffffffffffU)
+  if (a3 < 0xffffffffffffffffULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 < (uint64_t)0xfffffffffffffffeU)
+  else if (a2 < 0xfffffffffffffffeULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 > (uint64_t)0xfffffffffffffffeU)
+  else if (a2 > 0xfffffffffffffffeULL)
   {
     is_lt_q_b = false;
   }
-  else if (a1 < (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 < 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = true;
   }
-  else if (a1 > (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 > 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = false;
   }
   else
   {
-    is_lt_q_b = a0 < (uint64_t)0xbfd25e8cd0364141U;
+    is_lt_q_b = a0 < 0xbfd25e8cd0364141ULL;
   }
   return !is_zero && is_lt_q_b;
 }
@@ -402,16 +402,16 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 static inline void modq_short(uint64_t *out, uint64_t *a)
 {
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t c = add4(a, tmp, out);
-  uint64_t mask = (uint64_t)0U - c;
+  uint64_t mask = 0ULL - c;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & a[i]);
     os[i] = x;);
@@ -421,35 +421,31 @@ static inline void load_qelem_modq(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
-  memcpy(tmp, f, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, f, 4U * sizeof (uint64_t));
   modq_short(f, tmp);
 }
 
 static inline void store_qelem(uint8_t *b, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void qadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   add_mod4(n, f1, f2, out);
 }
 
@@ -463,33 +459,33 @@ mul_pow2_256_minus_q_add(
   uint64_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + (uint32_t)2U);
-  uint64_t tmp[len + (uint32_t)2U];
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), len + 2U);
+  uint64_t tmp[len + 2U];
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
   KRML_MAYBE_FOR2(i0,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint64_t bj = t01[i0];
     uint64_t *res_j = tmp + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -497,9 +493,9 @@ mul_pow2_256_minus_q_add(
     }
     uint64_t r = c;
     tmp[len + i0] = r;);
-  memcpy(res + (uint32_t)2U, a, len * sizeof (uint64_t));
-  KRML_HOST_IGNORE(bn_add(resLen, res, len + (uint32_t)2U, tmp, res));
-  uint64_t c = bn_add(resLen, res, (uint32_t)4U, e, res);
+  memcpy(res + 2U, a, len * sizeof (uint64_t));
+  bn_add(resLen, res, len + 2U, tmp, res);
+  uint64_t c = bn_add(resLen, res, 4U, e, res);
   return c;
 }
 
@@ -507,34 +503,23 @@ static inline void modq(uint64_t *out, uint64_t *a)
 {
   uint64_t r[4U] = { 0U };
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t *t01 = tmp;
   uint64_t m[7U] = { 0U };
   uint64_t p[5U] = { 0U };
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)4U,
-      (uint32_t)7U,
-      t01,
-      a + (uint32_t)4U,
-      a,
-      m));
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)3U,
-      (uint32_t)5U,
-      t01,
-      m + (uint32_t)4U,
-      m,
-      p));
-  uint64_t
-  c2 = mul_pow2_256_minus_q_add((uint32_t)1U, (uint32_t)4U, t01, p + (uint32_t)4U, p, r);
+  mul_pow2_256_minus_q_add(4U, 7U, t01, a + 4U, a, m);
+  mul_pow2_256_minus_q_add(3U, 5U, t01, m + 4U, m, p);
+  uint64_t c2 = mul_pow2_256_minus_q_add(1U, 4U, t01, p + 4U, p, r);
   uint64_t c0 = c2;
   uint64_t c1 = add4(r, tmp, out);
-  uint64_t mask = (uint64_t)0U - (c0 + c1);
+  uint64_t mask = 0ULL - (c0 + c1);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & r[i]);
     os[i] = x;);
@@ -557,10 +542,10 @@ static inline void qsqr(uint64_t *out, uint64_t *f)
 static inline void qnegate_conditional_vartime(uint64_t *f, bool is_negate)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   uint64_t zero[4U] = { 0U };
   if (is_negate)
   {
@@ -574,31 +559,31 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f)
   uint64_t a1 = f[1U];
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
-  if (a3 < (uint64_t)0x7fffffffffffffffU)
+  if (a3 < 0x7fffffffffffffffULL)
   {
     return true;
   }
-  if (a3 > (uint64_t)0x7fffffffffffffffU)
+  if (a3 > 0x7fffffffffffffffULL)
   {
     return false;
   }
-  if (a2 < (uint64_t)0xffffffffffffffffU)
+  if (a2 < 0xffffffffffffffffULL)
   {
     return true;
   }
-  if (a2 > (uint64_t)0xffffffffffffffffU)
+  if (a2 > 0xffffffffffffffffULL)
   {
     return false;
   }
-  if (a1 < (uint64_t)0x5d576e7357a4501dU)
+  if (a1 < 0x5d576e7357a4501dULL)
   {
     return true;
   }
-  if (a1 > (uint64_t)0x5d576e7357a4501dU)
+  if (a1 > 0x5d576e7357a4501dULL)
   {
     return false;
   }
-  return a0 <= (uint64_t)0xdfe92f46681b20a0U;
+  return a0 <= 0xdfe92f46681b20a0ULL;
 }
 
 static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
@@ -606,27 +591,26 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
   uint64_t l[8U] = { 0U };
   mul4(a, b, l);
   uint64_t res_b_padded[4U] = { 0U };
-  memcpy(res_b_padded, l + (uint32_t)6U, (uint32_t)2U * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, res_b_padded[0U], (uint64_t)1U, res);
-  uint64_t *a1 = res_b_padded + (uint32_t)1U;
-  uint64_t *res1 = res + (uint32_t)1U;
+  memcpy(res_b_padded, l + 6U, 2U * sizeof (uint64_t));
+  uint64_t c0 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, res_b_padded[0U], 1ULL, res);
+  uint64_t *a1 = res_b_padded + 1U;
+  uint64_t *res1 = res + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t flag = l[5U] >> (uint32_t)63U;
-  uint64_t mask = (uint64_t)0U - flag;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t flag = l[5U] >> 63U;
+  uint64_t mask = 0ULL - flag;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (mask & res[i]) | (~mask & res_b_padded[i]);
     os[i] = x;);
@@ -634,7 +618,7 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
 
 static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -642,8 +626,8 @@ static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 
 static inline void qsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -658,7 +642,7 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x_1001[4U] = { 0U };
   uint64_t x_1011[4U] = { 0U };
   uint64_t x_1101[4U] = { 0U };
-  qsquare_times(x_10, f, (uint32_t)1U);
+  qsquare_times(x_10, f, 1U);
   qmul(x_11, x_10, f);
   qmul(x_101, x_10, x_11);
   qmul(x_111, x_10, x_101);
@@ -668,89 +652,89 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x6[4U] = { 0U };
   uint64_t x8[4U] = { 0U };
   uint64_t x14[4U] = { 0U };
-  qsquare_times(x6, x_1101, (uint32_t)2U);
+  qsquare_times(x6, x_1101, 2U);
   qmul(x6, x6, x_1011);
-  qsquare_times(x8, x6, (uint32_t)2U);
+  qsquare_times(x8, x6, 2U);
   qmul(x8, x8, x_11);
-  qsquare_times(x14, x8, (uint32_t)6U);
+  qsquare_times(x14, x8, 6U);
   qmul(x14, x14, x6);
   uint64_t x56[4U] = { 0U };
-  qsquare_times(out, x14, (uint32_t)14U);
+  qsquare_times(out, x14, 14U);
   qmul(out, out, x14);
-  qsquare_times(x56, out, (uint32_t)28U);
+  qsquare_times(x56, out, 28U);
   qmul(x56, x56, out);
-  qsquare_times(out, x56, (uint32_t)56U);
+  qsquare_times(out, x56, 56U);
   qmul(out, out, x56);
-  qsquare_times_in_place(out, (uint32_t)14U);
+  qsquare_times_in_place(out, 14U);
   qmul(out, out, x14);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)9U);
+  qsquare_times_in_place(out, 9U);
   qmul(out, out, x8);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_11);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, f);
-  qsquare_times_in_place(out, (uint32_t)8U);
+  qsquare_times_in_place(out, 8U);
   qmul(out, out, x6);
 }
 
 void Hacl_Impl_K256_Point_make_point_at_inf(uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
-  memset(px, 0U, (uint32_t)5U * sizeof (uint64_t));
-  memset(py, 0U, (uint32_t)5U * sizeof (uint64_t));
-  py[0U] = (uint64_t)1U;
-  memset(pz, 0U, (uint32_t)5U * sizeof (uint64_t));
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
+  memset(px, 0U, 5U * sizeof (uint64_t));
+  memset(py, 0U, 5U * sizeof (uint64_t));
+  py[0U] = 1ULL;
+  memset(pz, 0U, 5U * sizeof (uint64_t));
 }
 
 static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 {
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -762,7 +746,7 @@ static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 static inline void to_aff_point_x(uint64_t *x, uint64_t *p)
 {
   uint64_t *x1 = p;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -773,13 +757,13 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 {
   uint64_t y2_exp[5U] = { 0U };
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
+  uint64_t *y = p + 5U;
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2_exp, x);
   Hacl_K256_Field_fmul(y2_exp, y2_exp, x);
   Hacl_K256_Field_fadd(y2_exp, y2_exp, b);
@@ -795,11 +779,11 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t *ox = out;
-  uint64_t *oy = out + (uint32_t)5U;
-  uint64_t *oz = out + (uint32_t)10U;
+  uint64_t *oy = out + 5U;
+  uint64_t *oz = out + 10U;
   ox[0U] = px[0U];
   ox[1U] = px[1U];
   ox[2U] = px[2U];
@@ -815,11 +799,11 @@ void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
   uint64_t a2 = py[2U];
   uint64_t a3 = py[3U];
   uint64_t a4 = py[4U];
-  uint64_t r0 = (uint64_t)18014381329608892U - a0;
-  uint64_t r1 = (uint64_t)18014398509481980U - a1;
-  uint64_t r2 = (uint64_t)18014398509481980U - a2;
-  uint64_t r3 = (uint64_t)18014398509481980U - a3;
-  uint64_t r4 = (uint64_t)1125899906842620U - a4;
+  uint64_t r0 = 18014381329608892ULL - a0;
+  uint64_t r1 = 18014398509481980ULL - a1;
+  uint64_t r2 = 18014398509481980ULL - a2;
+  uint64_t r3 = 18014398509481980ULL - a3;
+  uint64_t r4 = 1125899906842620ULL - a4;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
   uint64_t f2 = r2;
@@ -845,9 +829,9 @@ static inline void point_negate_conditional_vartime(uint64_t *p, bool is_negate)
 static inline void aff_point_store(uint8_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
+  uint64_t *py = p + 5U;
   Hacl_K256_Field_store_felem(out, px);
-  Hacl_K256_Field_store_felem(out + (uint32_t)32U, py);
+  Hacl_K256_Field_store_felem(out + 32U, py);
 }
 
 void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
@@ -860,9 +844,9 @@ void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
 bool Hacl_Impl_K256_Point_aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *px = b;
-  uint8_t *py = b + (uint32_t)32U;
+  uint8_t *py = b + 32U;
   uint64_t *bn_px = p;
-  uint64_t *bn_py = p + (uint32_t)5U;
+  uint64_t *bn_py = p + 5U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_px, px);
   bool is_y_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_py, py);
   if (is_x_valid && is_y_valid)
@@ -879,14 +863,14 @@ static inline bool load_point_vartime(uint64_t *p, uint8_t *b)
   if (res)
   {
     uint64_t *x = p_aff;
-    uint64_t *y = p_aff + (uint32_t)5U;
+    uint64_t *y = p_aff + 5U;
     uint64_t *x1 = p;
-    uint64_t *y1 = p + (uint32_t)5U;
-    uint64_t *z1 = p + (uint32_t)10U;
-    memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-    memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-    z1[0U] = (uint64_t)1U;
+    uint64_t *y1 = p + 5U;
+    uint64_t *z1 = p + 10U;
+    memcpy(x1, x, 5U * sizeof (uint64_t));
+    memcpy(y1, y, 5U * sizeof (uint64_t));
+    memset(z1, 0U, 5U * sizeof (uint64_t));
+    z1[0U] = 1ULL;
   }
   return res;
 }
@@ -895,24 +879,24 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(x, xb);
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid)
   {
     return false;
   }
   uint64_t y2[5U] = { 0U };
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2, x);
   Hacl_K256_Field_fmul(y2, y2, x);
   Hacl_K256_Field_fadd(y2, y2, b);
@@ -930,7 +914,7 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
     return false;
   }
   uint64_t x0 = y[0U];
-  bool is_y_odd1 = (x0 & (uint64_t)1U) == (uint64_t)1U;
+  bool is_y_odd1 = (x0 & 1ULL) == 1ULL;
   Hacl_K256_Field_fnegate_conditional_vartime(y, is_y_odd1 != is_y_odd);
   return true;
 }
@@ -939,33 +923,33 @@ void Hacl_Impl_K256_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[25U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *yy = tmp;
-  uint64_t *zz = tmp + (uint32_t)5U;
-  uint64_t *bzz3 = tmp + (uint32_t)10U;
-  uint64_t *bzz9 = tmp + (uint32_t)15U;
-  uint64_t *tmp1 = tmp + (uint32_t)20U;
+  uint64_t *zz = tmp + 5U;
+  uint64_t *bzz3 = tmp + 10U;
+  uint64_t *bzz9 = tmp + 15U;
+  uint64_t *tmp1 = tmp + 20U;
   Hacl_K256_Field_fsqr(yy, y1);
   Hacl_K256_Field_fsqr(zz, z1);
-  Hacl_K256_Field_fmul_small_num(x3, x1, (uint64_t)2U);
+  Hacl_K256_Field_fmul_small_num(x3, x1, 2ULL);
   Hacl_K256_Field_fmul(x3, x3, y1);
   Hacl_K256_Field_fmul(tmp1, yy, y1);
   Hacl_K256_Field_fmul(z3, tmp1, z1);
-  Hacl_K256_Field_fmul_small_num(z3, z3, (uint64_t)8U);
+  Hacl_K256_Field_fmul_small_num(z3, z3, 8ULL);
   Hacl_K256_Field_fnormalize_weak(z3, z3);
-  Hacl_K256_Field_fmul_small_num(bzz3, zz, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(bzz3, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(bzz3, bzz3);
-  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, (uint64_t)3U);
-  Hacl_K256_Field_fsub(bzz9, yy, bzz9, (uint64_t)6U);
+  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, 3ULL);
+  Hacl_K256_Field_fsub(bzz9, yy, bzz9, 6ULL);
   Hacl_K256_Field_fadd(tmp1, yy, bzz3);
   Hacl_K256_Field_fmul(tmp1, bzz9, tmp1);
   Hacl_K256_Field_fmul(y3, yy, zz);
   Hacl_K256_Field_fmul(x3, x3, bzz9);
-  Hacl_K256_Field_fmul_small_num(y3, y3, (uint64_t)168U);
+  Hacl_K256_Field_fmul_small_num(y3, y3, 168ULL);
   Hacl_K256_Field_fadd(y3, tmp1, y3);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
 }
@@ -974,23 +958,23 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[45U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
-  uint64_t *z2 = q + (uint32_t)10U;
+  uint64_t *y2 = q + 5U;
+  uint64_t *z2 = q + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *xx = tmp;
-  uint64_t *yy = tmp + (uint32_t)5U;
-  uint64_t *zz = tmp + (uint32_t)10U;
-  uint64_t *xy_pairs = tmp + (uint32_t)15U;
-  uint64_t *yz_pairs = tmp + (uint32_t)20U;
-  uint64_t *xz_pairs = tmp + (uint32_t)25U;
-  uint64_t *yy_m_bzz3 = tmp + (uint32_t)30U;
-  uint64_t *yy_p_bzz3 = tmp + (uint32_t)35U;
-  uint64_t *tmp1 = tmp + (uint32_t)40U;
+  uint64_t *yy = tmp + 5U;
+  uint64_t *zz = tmp + 10U;
+  uint64_t *xy_pairs = tmp + 15U;
+  uint64_t *yz_pairs = tmp + 20U;
+  uint64_t *xz_pairs = tmp + 25U;
+  uint64_t *yy_m_bzz3 = tmp + 30U;
+  uint64_t *yy_p_bzz3 = tmp + 35U;
+  uint64_t *tmp1 = tmp + 40U;
   Hacl_K256_Field_fmul(xx, x1, x2);
   Hacl_K256_Field_fmul(yy, y1, y2);
   Hacl_K256_Field_fmul(zz, z1, z2);
@@ -998,29 +982,29 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
   Hacl_K256_Field_fadd(tmp1, x2, y2);
   Hacl_K256_Field_fmul(xy_pairs, xy_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, yy);
-  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(yz_pairs, y1, z1);
   Hacl_K256_Field_fadd(tmp1, y2, z2);
   Hacl_K256_Field_fmul(yz_pairs, yz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, yy, zz);
-  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(xz_pairs, x1, z1);
   Hacl_K256_Field_fadd(tmp1, x2, z2);
   Hacl_K256_Field_fmul(xz_pairs, xz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, zz);
-  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, (uint64_t)4U);
-  Hacl_K256_Field_fmul_small_num(tmp1, zz, (uint64_t)21U);
+  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, 4ULL);
+  Hacl_K256_Field_fmul_small_num(tmp1, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(tmp1, tmp1);
-  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, (uint64_t)2U);
+  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, 2ULL);
   Hacl_K256_Field_fadd(yy_p_bzz3, yy, tmp1);
-  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, 21ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
-  Hacl_K256_Field_fmul_small_num(z3, xx, (uint64_t)3U);
-  Hacl_K256_Field_fmul_small_num(y3, z3, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(z3, xx, 3ULL);
+  Hacl_K256_Field_fmul_small_num(y3, z3, 21ULL);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
   Hacl_K256_Field_fmul(tmp1, xy_pairs, yy_m_bzz3);
   Hacl_K256_Field_fmul(x3, x3, xz_pairs);
-  Hacl_K256_Field_fsub(x3, tmp1, x3, (uint64_t)2U);
+  Hacl_K256_Field_fsub(x3, tmp1, x3, 2ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
   Hacl_K256_Field_fmul(tmp1, yy_p_bzz3, yy_m_bzz3);
   Hacl_K256_Field_fmul(y3, y3, xz_pairs);
@@ -1036,30 +1020,30 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 {
   uint64_t tmp1[4U] = { 0U };
   uint64_t tmp2[4U] = { 0U };
-  tmp1[0U] = (uint64_t)0xe893209a45dbb031U;
-  tmp1[1U] = (uint64_t)0x3daa8a1471e8ca7fU;
-  tmp1[2U] = (uint64_t)0xe86c90e49284eb15U;
-  tmp1[3U] = (uint64_t)0x3086d221a7d46bcdU;
-  tmp2[0U] = (uint64_t)0x1571b4ae8ac47f71U;
-  tmp2[1U] = (uint64_t)0x221208ac9df506c6U;
-  tmp2[2U] = (uint64_t)0x6f547fa90abfe4c4U;
-  tmp2[3U] = (uint64_t)0xe4437ed6010e8828U;
+  tmp1[0U] = 0xe893209a45dbb031ULL;
+  tmp1[1U] = 0x3daa8a1471e8ca7fULL;
+  tmp1[2U] = 0xe86c90e49284eb15ULL;
+  tmp1[3U] = 0x3086d221a7d46bcdULL;
+  tmp2[0U] = 0x1571b4ae8ac47f71ULL;
+  tmp2[1U] = 0x221208ac9df506c6ULL;
+  tmp2[2U] = 0x6f547fa90abfe4c4ULL;
+  tmp2[3U] = 0xe4437ed6010e8828ULL;
   qmul_shift_384(r1, k, tmp1);
   qmul_shift_384(r2, k, tmp2);
-  tmp1[0U] = (uint64_t)0x6f547fa90abfe4c3U;
-  tmp1[1U] = (uint64_t)0xe4437ed6010e8828U;
-  tmp1[2U] = (uint64_t)0x0U;
-  tmp1[3U] = (uint64_t)0x0U;
-  tmp2[0U] = (uint64_t)0xd765cda83db1562cU;
-  tmp2[1U] = (uint64_t)0x8a280ac50774346dU;
-  tmp2[2U] = (uint64_t)0xfffffffffffffffeU;
-  tmp2[3U] = (uint64_t)0xffffffffffffffffU;
+  tmp1[0U] = 0x6f547fa90abfe4c3ULL;
+  tmp1[1U] = 0xe4437ed6010e8828ULL;
+  tmp1[2U] = 0x0ULL;
+  tmp1[3U] = 0x0ULL;
+  tmp2[0U] = 0xd765cda83db1562cULL;
+  tmp2[1U] = 0x8a280ac50774346dULL;
+  tmp2[2U] = 0xfffffffffffffffeULL;
+  tmp2[3U] = 0xffffffffffffffffULL;
   qmul(r1, r1, tmp1);
   qmul(r2, r2, tmp2);
-  tmp1[0U] = (uint64_t)0xe0cfc810b51283cfU;
-  tmp1[1U] = (uint64_t)0xa880b9fc8ec739c2U;
-  tmp1[2U] = (uint64_t)0x5ad9e3fd77ed9ba4U;
-  tmp1[3U] = (uint64_t)0xac9c52b33fa3cf1fU;
+  tmp1[0U] = 0xe0cfc810b51283cfULL;
+  tmp1[1U] = 0xa880b9fc8ec739c2ULL;
+  tmp1[2U] = 0x5ad9e3fd77ed9ba4ULL;
+  tmp1[3U] = 0xac9c52b33fa3cf1fULL;
   qadd(r2, r1, r2);
   qmul(tmp2, r2, tmp1);
   qadd(r1, k, tmp2);
@@ -1068,17 +1052,17 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 static inline void point_mul_lambda(uint64_t *res, uint64_t *p)
 {
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)5U;
-  uint64_t *rz = res + (uint32_t)10U;
+  uint64_t *ry = res + 5U;
+  uint64_t *rz = res + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, px);
   ry[0U] = py[0U];
   ry[1U] = py[1U];
@@ -1096,11 +1080,11 @@ static inline void point_mul_lambda_inplace(uint64_t *res)
 {
   uint64_t *rx = res;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, rx);
 }
 
@@ -1123,7 +1107,7 @@ ecmult_endo_split(
 {
   scalar_split_lambda(r1, r2, scalar);
   point_mul_lambda(q2, q);
-  memcpy(q1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(q1, q, 15U * sizeof (uint64_t));
   bool b0 = is_qelem_le_q_halved_vartime(r1);
   qnegate_conditional_vartime(r1, !b0);
   point_negate_conditional_vartime(q1, !b0);
@@ -1140,45 +1124,37 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
   uint64_t table[240U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)15U;
+  uint64_t *t1 = table + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp0[15U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 15U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)15U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 15U;
       KRML_MAYBE_FOR15(i,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
+        0U,
+        15U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -1188,17 +1164,17 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 15U;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
+      0U,
+      15U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1208,79 +1184,72 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar)
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t
   q2[15U] =
     {
-      (uint64_t)4496295042185355U, (uint64_t)3125448202219451U, (uint64_t)1239608518490046U,
-      (uint64_t)2687445637493112U, (uint64_t)77979604880139U, (uint64_t)3360310474215011U,
-      (uint64_t)1216410458165163U, (uint64_t)177901593587973U, (uint64_t)3209978938104985U,
-      (uint64_t)118285133003718U, (uint64_t)434519962075150U, (uint64_t)1114612377498854U,
-      (uint64_t)3488596944003813U, (uint64_t)450716531072892U, (uint64_t)66044973203836U
+      4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+      77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+      3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+      3488596944003813ULL, 450716531072892ULL, 66044973203836ULL
     };
-  KRML_HOST_IGNORE(q2);
+  KRML_MAYBE_UNUSED_VAR(q2);
   uint64_t
   q3[15U] =
     {
-      (uint64_t)1277614565900951U, (uint64_t)378671684419493U, (uint64_t)3176260448102880U,
-      (uint64_t)1575691435565077U, (uint64_t)167304528382180U, (uint64_t)2600787765776588U,
-      (uint64_t)7497946149293U, (uint64_t)2184272641272202U, (uint64_t)2200235265236628U,
-      (uint64_t)265969268774814U, (uint64_t)1913228635640715U, (uint64_t)2831959046949342U,
-      (uint64_t)888030405442963U, (uint64_t)1817092932985033U, (uint64_t)101515844997121U
+      1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+      167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+      2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+      888030405442963ULL, 1817092932985033ULL, 101515844997121ULL
     };
-  KRML_HOST_IGNORE(q3);
+  KRML_MAYBE_UNUSED_VAR(q3);
   uint64_t
   q4[15U] =
     {
-      (uint64_t)34056422761564U, (uint64_t)3315864838337811U, (uint64_t)3797032336888745U,
-      (uint64_t)2580641850480806U, (uint64_t)208048944042500U, (uint64_t)1233795288689421U,
-      (uint64_t)1048795233382631U, (uint64_t)646545158071530U, (uint64_t)1816025742137285U,
-      (uint64_t)12245672982162U, (uint64_t)2119364213800870U, (uint64_t)2034960311715107U,
-      (uint64_t)3172697815804487U, (uint64_t)4185144850224160U, (uint64_t)2792055915674U
+      34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+      208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+      1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+      3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL
     };
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q4);
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp[15U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp););
 }
@@ -1290,75 +1259,65 @@ point_mul_g_double_vartime(uint64_t *out, uint64_t *scalar1, uint64_t *scalar2,
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp1[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   }
 }
@@ -1380,99 +1339,89 @@ point_mul_g_double_split_lambda_table(
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, p2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, p2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, p2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
   uint64_t tmp1[15U] = { 0U };
-  uint32_t i0 = (uint32_t)125U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, i0, (uint32_t)5U);
+  uint32_t i0 = 125U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(out, is_negate1);
-  uint32_t i1 = (uint32_t)125U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, i1, (uint32_t)5U);
+  uint32_t i1 = 125U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
   const
   uint64_t
-  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp1, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * 15U;
+  memcpy(tmp1, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp1, is_negate2);
   point_mul_lambda_inplace(tmp1);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   uint64_t tmp10[15U] = { 0U };
-  uint32_t i2 = (uint32_t)125U;
-  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, i2, (uint32_t)5U);
+  uint32_t i2 = 125U;
+  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, i2, 5U);
   uint32_t bits_l321 = (uint32_t)bits_c1;
-  const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp0, is_negate3);
-  uint32_t i3 = (uint32_t)125U;
-  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, i3, (uint32_t)5U);
+  uint32_t i3 = 125U;
+  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, i3, 5U);
   uint32_t bits_l322 = (uint32_t)bits_c2;
-  const uint64_t *a_bits_l2 = table2 + bits_l322 * (uint32_t)15U;
-  memcpy(tmp10, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l2 = table2 + bits_l322 * 15U;
+  memcpy(tmp10, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp10, is_negate4);
   point_mul_lambda_inplace(tmp10);
   Hacl_Impl_K256_PointAdd_point_add(tmp0, tmp0, tmp10);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp2[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    KRML_MAYBE_FOR5(i4,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i4, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 125U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, k, 5U);
     uint32_t bits_l323 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l3 = table2 + bits_l323 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l3, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l3 = table2 + bits_l323 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l3, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate4);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k0 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, k0, (uint32_t)5U);
+    uint32_t k0 = 125U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, k0, 5U);
     uint32_t bits_l324 = (uint32_t)bits_l0;
-    const uint64_t *a_bits_l4 = table2 + bits_l324 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l4, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l4 = table2 + bits_l324 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l4, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate3);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k1 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, k1, (uint32_t)5U);
+    uint32_t k1 = 125U - 5U * i - 5U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, k1, 5U);
     uint32_t bits_l325 = (uint32_t)bits_l1;
     const
     uint64_t
-    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l5, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l5, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate2);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k2 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, k2, (uint32_t)5U);
+    uint32_t k2 = 125U - 5U * i - 5U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, k2, 5U);
     uint32_t bits_l326 = (uint32_t)bits_l2;
     const
     uint64_t
-    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l6, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l6, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate1);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
   }
@@ -1483,16 +1432,16 @@ check_ecmult_endo_split(uint64_t *r1, uint64_t *r2, uint64_t *r3, uint64_t *r4)
 {
   uint64_t f20 = r1[2U];
   uint64_t f30 = r1[3U];
-  bool b1 = f20 == (uint64_t)0U && f30 == (uint64_t)0U;
+  bool b1 = f20 == 0ULL && f30 == 0ULL;
   uint64_t f21 = r2[2U];
   uint64_t f31 = r2[3U];
-  bool b2 = f21 == (uint64_t)0U && f31 == (uint64_t)0U;
+  bool b2 = f21 == 0ULL && f31 == 0ULL;
   uint64_t f22 = r3[2U];
   uint64_t f32 = r3[3U];
-  bool b3 = f22 == (uint64_t)0U && f32 == (uint64_t)0U;
+  bool b3 = f22 == 0ULL && f32 == 0ULL;
   uint64_t f2 = r4[2U];
   uint64_t f3 = r4[3U];
-  bool b4 = f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  bool b4 = f2 == 0ULL && f3 == 0ULL;
   return b1 && b2 && b3 && b4;
 }
 
@@ -1515,30 +1464,30 @@ point_mul_g_double_split_lambda_vartime(
 {
   uint64_t g[15U] = { 0U };
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t r1234[16U] = { 0U };
   uint64_t q1234[60U] = { 0U };
   uint64_t *r1 = r1234;
-  uint64_t *r2 = r1234 + (uint32_t)4U;
-  uint64_t *r3 = r1234 + (uint32_t)8U;
-  uint64_t *r4 = r1234 + (uint32_t)12U;
+  uint64_t *r2 = r1234 + 4U;
+  uint64_t *r3 = r1234 + 8U;
+  uint64_t *r4 = r1234 + 12U;
   uint64_t *q1 = q1234;
-  uint64_t *q2 = q1234 + (uint32_t)15U;
-  uint64_t *q3 = q1234 + (uint32_t)30U;
-  uint64_t *q4 = q1234 + (uint32_t)45U;
+  uint64_t *q2 = q1234 + 15U;
+  uint64_t *q3 = q1234 + 30U;
+  uint64_t *q4 = q1234 + 45U;
   __bool_bool scrut0 = ecmult_endo_split(r1, r2, q1, q2, scalar1, g);
   bool is_high10 = scrut0.fst;
   bool is_high20 = scrut0.snd;
@@ -1615,30 +1564,30 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   uint8_t *nonce
 )
 {
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
-  KRML_HOST_IGNORE(oneq);
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
+  KRML_MAYBE_UNUSED_VAR(oneq);
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   uint64_t is_b_valid0 = load_qelem_check(d_a, private_key);
-  uint64_t oneq10[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq10[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq10[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
     os[i] = x;);
   uint64_t is_sk_valid = is_b_valid0;
   uint64_t is_b_valid = load_qelem_check(k_q, nonce);
-  uint64_t oneq1[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq1[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq1[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1660,11 +1609,11 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   qadd(s_q, z, s_q);
   qmul(s_q, kinv, s_q);
   store_qelem(signature, r_q);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   uint64_t is_r_zero = is_qelem_zero(r_q);
   uint64_t is_s_zero = is_qelem_zero(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1713,14 +1662,14 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
 {
   uint64_t tmp[35U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)15U;
-  uint64_t *s_q = tmp + (uint32_t)19U;
-  uint64_t *u1 = tmp + (uint32_t)23U;
-  uint64_t *u2 = tmp + (uint32_t)27U;
-  uint64_t *m_q = tmp + (uint32_t)31U;
+  uint64_t *r_q = tmp + 15U;
+  uint64_t *s_q = tmp + 19U;
+  uint64_t *u1 = tmp + 23U;
+  uint64_t *u2 = tmp + 27U;
+  uint64_t *m_q = tmp + 31U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bool is_r_valid = load_qelem_vartime(r_q, signature);
-  bool is_s_valid = load_qelem_vartime(s_q, signature + (uint32_t)32U);
+  bool is_s_valid = load_qelem_vartime(s_q, signature + 32U);
   bool is_rs_valid = is_r_valid && is_s_valid;
   load_qelem_modq(m_q, m);
   if (!(is_pk_valid && is_rs_valid))
@@ -1734,7 +1683,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
   uint64_t res[15U] = { 0U };
   point_mul_g_double_split_lambda_vartime(res, u1, u2, pk);
   uint64_t tmp1[5U] = { 0U };
-  uint64_t *pz = res + (uint32_t)10U;
+  uint64_t *pz = res + 10U;
   Hacl_K256_Field_fnormalize(tmp1, pz);
   bool b = Hacl_K256_Field_is_felem_zero_vartime(tmp1);
   if (b)
@@ -1742,7 +1691,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     return false;
   }
   uint64_t *x = res;
-  uint64_t *z = res + (uint32_t)10U;
+  uint64_t *z = res + 10U;
   uint8_t r_bytes[32U] = { 0U };
   uint64_t r_fe[5U] = { 0U };
   uint64_t tmp_q[5U] = { 0U };
@@ -1756,11 +1705,11 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     bool is_r_lt_p_m_q = Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(r_fe);
     if (is_r_lt_p_m_q)
     {
-      tmp_q[0U] = (uint64_t)0x25e8cd0364141U;
-      tmp_q[1U] = (uint64_t)0xe6af48a03bbfdU;
-      tmp_q[2U] = (uint64_t)0xffffffebaaedcU;
-      tmp_q[3U] = (uint64_t)0xfffffffffffffU;
-      tmp_q[4U] = (uint64_t)0xffffffffffffU;
+      tmp_q[0U] = 0x25e8cd0364141ULL;
+      tmp_q[1U] = 0xe6af48a03bbfdULL;
+      tmp_q[2U] = 0xffffffebaaedcULL;
+      tmp_q[3U] = 0xfffffffffffffULL;
+      tmp_q[4U] = 0xffffffffffffULL;
       Hacl_K256_Field_fadd(tmp_q, r_fe, tmp_q);
       return fmul_eq_vartime(tmp_q, z, tmp_x);
     }
@@ -1805,7 +1754,7 @@ Compute canonical lowest S value for `signature` (R || S).
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_sk_valid = load_qelem_vartime(s_q, s);
   if (!is_sk_valid)
   {
@@ -1813,7 +1762,7 @@ bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
   }
   bool is_sk_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   qnegate_conditional_vartime(s_q, !is_sk_lt_q_halved);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   return true;
 }
 
@@ -1827,7 +1776,7 @@ Check whether `signature` (R || S) is in canonical form.
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_s_valid = load_qelem_vartime(s_q, s);
   bool is_s_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   return is_s_valid && is_s_lt_q_halved;
@@ -1971,11 +1920,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_K256_ECDSA_public_key_uncompressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1989,8 +1938,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_K256_ECDSA_public_key_uncompressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -2007,12 +1956,12 @@ bool Hacl_K256_ECDSA_public_key_compressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint64_t xa[5U] = { 0U };
   uint64_t ya[5U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    Hacl_K256_Field_store_felem(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    Hacl_K256_Field_store_felem(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -2028,20 +1977,20 @@ Convert a public key from raw to its compressed form.
 void Hacl_K256_ECDSA_public_key_compressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint8_t x0 = pk_y[31U];
-  bool is_pk_y_odd = (x0 & (uint8_t)1U) == (uint8_t)1U;
+  bool is_pk_y_odd = ((uint32_t)x0 & 1U) == 1U;
   uint8_t ite;
   if (is_pk_y_odd)
   {
-    ite = (uint8_t)0x03U;
+    ite = 0x03U;
   }
   else
   {
-    ite = (uint8_t)0x02U;
+    ite = 0x02U;
   }
   pk[0U] = ite;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
@@ -2084,7 +2033,7 @@ bool Hacl_K256_ECDSA_is_private_key_valid(uint8_t *private_key)
 {
   uint64_t s_q[4U] = { 0U };
   uint64_t res = load_qelem_check(s_q, private_key);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -2107,13 +2056,13 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[19U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *sk = tmp + (uint32_t)15U;
+  uint64_t *sk = tmp + 15U;
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2121,7 +2070,7 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   Hacl_Impl_K256_Point_point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -2140,15 +2089,15 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
 {
   uint64_t tmp[34U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *ss = tmp + (uint32_t)15U;
-  uint64_t *sk = tmp + (uint32_t)30U;
+  uint64_t *ss = tmp + 15U;
+  uint64_t *sk = tmp + 30U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2159,6 +2108,6 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
     Hacl_Impl_K256_PointMul_point_mul(ss, sk, pk);
     Hacl_Impl_K256_Point_point_store(shared_secret, ss);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
diff --git a/src/Hacl_NaCl.c b/src/Hacl_NaCl.c
index 37104040..8a64c531 100644
--- a/src/Hacl_NaCl.c
+++ b/src/Hacl_NaCl.c
@@ -30,9 +30,9 @@
 static void secretbox_init(uint8_t *xkeys, uint8_t *k, uint8_t *n)
 {
   uint8_t *subkey = xkeys;
-  uint8_t *aekey = xkeys + (uint32_t)32U;
+  uint8_t *aekey = xkeys + 32U;
   uint8_t *n0 = n;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *n1 = n + 16U;
   Hacl_Salsa20_hsalsa20(subkey, k, n0);
   Hacl_Salsa20_salsa20_key_block0(aekey, subkey, n1);
 }
@@ -42,34 +42,34 @@ secretbox_detached(uint32_t mlen, uint8_t *c, uint8_t *tag, uint8_t *k, uint8_t
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *mkey = xkeys + 32U;
+  uint8_t *n1 = n + 16U;
   uint8_t *subkey = xkeys;
-  uint8_t *ekey0 = xkeys + (uint32_t)64U;
+  uint8_t *ekey0 = xkeys + 64U;
   uint32_t mlen0;
-  if (mlen <= (uint32_t)32U)
+  if (mlen <= 32U)
   {
     mlen0 = mlen;
   }
   else
   {
-    mlen0 = (uint32_t)32U;
+    mlen0 = 32U;
   }
   uint32_t mlen1 = mlen - mlen0;
   uint8_t *m0 = m;
   uint8_t *m1 = m + mlen0;
   uint8_t block0[32U] = { 0U };
   memcpy(block0, m0, mlen0 * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = block0;
-    uint8_t x = block0[i] ^ ekey0[i];
+    uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
     os[i] = x;
   }
   uint8_t *c0 = c;
   uint8_t *c1 = c + mlen0;
   memcpy(c0, block0, mlen0 * sizeof (uint8_t));
-  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, (uint32_t)1U);
+  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, 1U);
   Hacl_Poly1305_32_poly1305_mac(tag, mlen, c, mkey);
 }
 
@@ -85,55 +85,55 @@ secretbox_open_detached(
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
+  uint8_t *mkey = xkeys + 32U;
   uint8_t tag_[16U] = { 0U };
   Hacl_Poly1305_32_poly1305_mac(tag_, mlen, c, mkey);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(tag[i], tag_[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
     uint8_t *subkey = xkeys;
-    uint8_t *ekey0 = xkeys + (uint32_t)64U;
-    uint8_t *n1 = n + (uint32_t)16U;
+    uint8_t *ekey0 = xkeys + 64U;
+    uint8_t *n1 = n + 16U;
     uint32_t mlen0;
-    if (mlen <= (uint32_t)32U)
+    if (mlen <= 32U)
     {
       mlen0 = mlen;
     }
     else
     {
-      mlen0 = (uint32_t)32U;
+      mlen0 = 32U;
     }
     uint32_t mlen1 = mlen - mlen0;
     uint8_t *c0 = c;
     uint8_t *c1 = c + mlen0;
     uint8_t block0[32U] = { 0U };
     memcpy(block0, c0, mlen0 * sizeof (uint8_t));
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t *os = block0;
-      uint8_t x = block0[i] ^ ekey0[i];
+      uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
       os[i] = x;
     }
     uint8_t *m0 = m;
     uint8_t *m1 = m + mlen0;
     memcpy(m0, block0, mlen0 * sizeof (uint8_t));
-    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, 1U);
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static void secretbox_easy(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   secretbox_detached(mlen, cip, tag, k, n, m);
 }
 
@@ -141,7 +141,7 @@ static uint32_t
 secretbox_open_easy(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return secretbox_open_detached(mlen, m, k, n, cip, tag);
 }
 
@@ -152,9 +152,9 @@ static inline uint32_t box_beforenm(uint8_t *k, uint8_t *pk, uint8_t *sk)
   if (r)
   {
     Hacl_Salsa20_hsalsa20(k, k, n0);
-    return (uint32_t)0U;
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -168,7 +168,7 @@ box_detached_afternm(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 static inline uint32_t
@@ -184,11 +184,11 @@ box_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_detached_afternm(mlen, c, tag, k, n, m);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -217,18 +217,18 @@ box_open_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_open_detached_afternm(mlen, m, k, n, c, tag);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
 box_easy_afternm(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached_afternm(mlen, cip, tag, k, n, m);
   return res;
 }
@@ -237,7 +237,7 @@ static inline uint32_t
 box_easy(uint32_t mlen, uint8_t *c, uint8_t *sk, uint8_t *pk, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached(mlen, cip, tag, sk, pk, n, m);
   return res;
 }
@@ -246,7 +246,7 @@ static inline uint32_t
 box_open_easy_afternm(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached_afternm(mlen, m, k, n, cip, tag);
 }
 
@@ -254,7 +254,7 @@ static inline uint32_t
 box_open_easy(uint32_t mlen, uint8_t *m, uint8_t *pk, uint8_t *sk, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached(mlen, m, pk, sk, n, cip, tag);
 }
 
@@ -281,7 +281,7 @@ Hacl_NaCl_crypto_secretbox_detached(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -322,7 +322,7 @@ uint32_t
 Hacl_NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint32_t mlen, uint8_t *n, uint8_t *k)
 {
   secretbox_easy(mlen, c, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -343,7 +343,7 @@ Hacl_NaCl_crypto_secretbox_open_easy(
   uint8_t *k
 )
 {
-  return secretbox_open_easy(clen - (uint32_t)16U, m, k, n, c);
+  return secretbox_open_easy(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -490,7 +490,7 @@ Hacl_NaCl_crypto_box_open_easy_afternm(
   uint8_t *k
 )
 {
-  return box_open_easy_afternm(clen - (uint32_t)16U, m, k, n, c);
+  return box_open_easy_afternm(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -513,6 +513,6 @@ Hacl_NaCl_crypto_box_open_easy(
   uint8_t *sk
 )
 {
-  return box_open_easy(clen - (uint32_t)16U, m, pk, sk, n, c);
+  return box_open_easy(clen - 16U, m, pk, sk, n, c);
 }
 
diff --git a/src/Hacl_P256.c b/src/Hacl_P256.c
index 7e586e54..ed09716d 100644
--- a/src/Hacl_P256.c
+++ b/src/Hacl_P256.c
@@ -33,11 +33,11 @@
 static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -48,16 +48,16 @@ static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 static inline bool bn_is_zero_vartime4(uint64_t *f)
 {
   uint64_t m = bn_is_zero_mask4(f);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -67,16 +67,16 @@ static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 static inline bool bn_is_eq_vartime4(uint64_t *a, uint64_t *b)
 {
   uint64_t m = bn_is_eq_mask4(a, b);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_t *y)
 {
-  uint64_t mask = ~FStar_UInt64_eq_mask(cin, (uint64_t)0U);
+  uint64_t mask = ~FStar_UInt64_eq_mask(cin, 0ULL);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t uu____0 = x[i];
     uint64_t x1 = uu____0 ^ (mask & (y[i] ^ uu____0));
@@ -85,52 +85,52 @@ static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_
 
 static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -138,23 +138,23 @@ static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c0 = c;
@@ -163,53 +163,53 @@ static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 
 static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x1;);
@@ -217,59 +217,59 @@ static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline void bn_mul4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = y[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = x[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = x[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = x[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = x[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = x[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static inline void bn_sqr4(uint64_t *res, uint64_t *x)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = x;
     uint64_t a_j = x[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -277,41 +277,37 @@ static inline void bn_sqr4(uint64_t *res, uint64_t *x)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(x[i], x[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void bn_to_bytes_be4(uint8_t *res, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
 }
@@ -319,79 +315,79 @@ static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 static inline void bn2_to_bytes_be4(uint8_t *res, uint64_t *x, uint64_t *y)
 {
   bn_to_bytes_be4(res, x);
-  bn_to_bytes_be4(res + (uint32_t)32U, y);
+  bn_to_bytes_be4(res + 32U, y);
 }
 
 static inline void make_prime(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xffffffffffffffffU;
-  n[1U] = (uint64_t)0xffffffffU;
-  n[2U] = (uint64_t)0x0U;
-  n[3U] = (uint64_t)0xffffffff00000001U;
+  n[0U] = 0xffffffffffffffffULL;
+  n[1U] = 0xffffffffULL;
+  n[2U] = 0x0ULL;
+  n[3U] = 0xffffffff00000001ULL;
 }
 
 static inline void make_order(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xf3b9cac2fc632551U;
-  n[1U] = (uint64_t)0xbce6faada7179e84U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xffffffff00000000U;
+  n[0U] = 0xf3b9cac2fc632551ULL;
+  n[1U] = 0xbce6faada7179e84ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xffffffff00000000ULL;
 }
 
 static inline void make_a_coeff(uint64_t *a)
 {
-  a[0U] = (uint64_t)0xfffffffffffffffcU;
-  a[1U] = (uint64_t)0x3ffffffffU;
-  a[2U] = (uint64_t)0x0U;
-  a[3U] = (uint64_t)0xfffffffc00000004U;
+  a[0U] = 0xfffffffffffffffcULL;
+  a[1U] = 0x3ffffffffULL;
+  a[2U] = 0x0ULL;
+  a[3U] = 0xfffffffc00000004ULL;
 }
 
 static inline void make_b_coeff(uint64_t *b)
 {
-  b[0U] = (uint64_t)0xd89cdf6229c4bddfU;
-  b[1U] = (uint64_t)0xacf005cd78843090U;
-  b[2U] = (uint64_t)0xe5a220abf7212ed6U;
-  b[3U] = (uint64_t)0xdc30061d04874834U;
+  b[0U] = 0xd89cdf6229c4bddfULL;
+  b[1U] = 0xacf005cd78843090ULL;
+  b[2U] = 0xe5a220abf7212ed6ULL;
+  b[3U] = 0xdc30061d04874834ULL;
 }
 
 static inline void make_g_x(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x79e730d418a9143cU;
-  n[1U] = (uint64_t)0x75ba95fc5fedb601U;
-  n[2U] = (uint64_t)0x79fb732b77622510U;
-  n[3U] = (uint64_t)0x18905f76a53755c6U;
+  n[0U] = 0x79e730d418a9143cULL;
+  n[1U] = 0x75ba95fc5fedb601ULL;
+  n[2U] = 0x79fb732b77622510ULL;
+  n[3U] = 0x18905f76a53755c6ULL;
 }
 
 static inline void make_g_y(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xddf25357ce95560aU;
-  n[1U] = (uint64_t)0x8b4ab8e4ba19e45cU;
-  n[2U] = (uint64_t)0xd2e88688dd21f325U;
-  n[3U] = (uint64_t)0x8571ff1825885d85U;
+  n[0U] = 0xddf25357ce95560aULL;
+  n[1U] = 0x8b4ab8e4ba19e45cULL;
+  n[2U] = 0xd2e88688dd21f325ULL;
+  n[3U] = 0x8571ff1825885d85ULL;
 }
 
 static inline void make_fmont_R2(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x3U;
-  n[1U] = (uint64_t)0xfffffffbffffffffU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0x4fffffffdU;
+  n[0U] = 0x3ULL;
+  n[1U] = 0xfffffffbffffffffULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0x4fffffffdULL;
 }
 
 static inline void make_fzero(uint64_t *n)
 {
-  n[0U] = (uint64_t)0U;
-  n[1U] = (uint64_t)0U;
-  n[2U] = (uint64_t)0U;
-  n[3U] = (uint64_t)0U;
+  n[0U] = 0ULL;
+  n[1U] = 0ULL;
+  n[2U] = 0ULL;
+  n[3U] = 0ULL;
 }
 
 static inline void make_fone(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x1U;
-  n[1U] = (uint64_t)0xffffffff00000000U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xfffffffeU;
+  n[0U] = 0x1ULL;
+  n[1U] = 0xffffffff00000000ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xfffffffeULL;
 }
 
 static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
@@ -399,7 +395,7 @@ static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_prime(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t feq_mask(uint64_t *a, uint64_t *b)
@@ -435,61 +431,61 @@ static inline void mont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_prime(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)1U * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 1ULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -512,7 +508,7 @@ static inline void fsqr0(uint64_t *res, uint64_t *x)
 static inline void from_mont(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, a, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, a, 4U * sizeof (uint64_t));
   mont_reduction(res, tmp);
 }
 
@@ -540,105 +536,105 @@ static inline void finv(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *x30 = tmp;
-  uint64_t *x2 = tmp + (uint32_t)4U;
-  uint64_t *tmp1 = tmp + (uint32_t)8U;
-  uint64_t *tmp2 = tmp + (uint32_t)12U;
-  memcpy(x2, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x2 = tmp + 4U;
+  uint64_t *tmp1 = tmp + 8U;
+  uint64_t *tmp2 = tmp + 12U;
+  memcpy(x2, a, 4U * sizeof (uint64_t));
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  memcpy(x30, x2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x30, x2, 4U * sizeof (uint64_t));
   {
     fsqr0(x30, x30);
   }
   fmul0(x30, x30, a);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(x30, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR15(i, (uint32_t)0U, (uint32_t)15U, (uint32_t)1U, fsqr0(x30, x30););
+  memcpy(x30, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR15(i, 0U, 15U, 1U, fsqr0(x30, x30););
   fmul0(x30, x30, tmp1);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x2);
-  memcpy(x2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(x2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)30U; i++)
+  for (uint32_t i = 0U; i < 30U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, x30);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(x2, x2););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(x2, x2););
   fmul0(tmp1, x2, a);
-  memcpy(res, tmp1, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp1, 4U * sizeof (uint64_t));
 }
 
 static inline void fsqrt(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)4U;
-  memcpy(tmp1, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *tmp2 = tmp + 4U;
+  memcpy(tmp1, a, 4U * sizeof (uint64_t));
   {
     fsqr0(tmp1, tmp1);
   }
   fmul0(tmp1, tmp1, a);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)96U; i++)
+  for (uint32_t i = 0U; i < 96U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)94U; i++)
+  for (uint32_t i = 0U; i < 94U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
-  memcpy(res, tmp2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp2, 4U * sizeof (uint64_t));
 }
 
 static inline void make_base_point(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_g_x(x);
   make_g_y(y);
   make_fone(z);
@@ -647,8 +643,8 @@ static inline void make_base_point(uint64_t *p)
 static inline void make_point_at_inf(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_fzero(x);
   make_fone(y);
   make_fzero(z);
@@ -656,7 +652,7 @@ static inline void make_point_at_inf(uint64_t *p)
 
 static inline bool is_point_at_inf_vartime(uint64_t *p)
 {
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   return bn_is_zero_vartime4(pz);
 }
 
@@ -664,10 +660,10 @@ static inline void to_aff_point(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *py = p + 4U;
+  uint64_t *pz = p + 8U;
   uint64_t *x = res;
-  uint64_t *y = res + (uint32_t)4U;
+  uint64_t *y = res + 4U;
   finv(zinv, pz);
   fmul0(x, px, zinv);
   fmul0(y, py, zinv);
@@ -679,7 +675,7 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   finv(zinv, pz);
   fmul0(res, px, zinv);
   from_mont(res, res);
@@ -688,10 +684,10 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 static inline void to_proj_point(uint64_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)4U;
-  uint64_t *rz = res + (uint32_t)8U;
+  uint64_t *ry = res + 4U;
+  uint64_t *rz = res + 8U;
   to_mont(rx, px);
   to_mont(ry, py);
   make_fone(rz);
@@ -703,7 +699,7 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   uint64_t tx[4U] = { 0U };
   uint64_t ty[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   to_mont(tx, px);
   to_mont(ty, py);
   uint64_t tmp[4U] = { 0U };
@@ -715,14 +711,14 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   fadd0(rp, tmp, rp);
   fsqr0(ty, ty);
   uint64_t r = feq_mask(ty, rp);
-  bool r0 = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool r0 = r == 0xFFFFFFFFFFFFFFFFULL;
   return r0;
 }
 
 static inline void aff_point_store(uint8_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   bn2_to_bytes_be4(res, px, py);
 }
 
@@ -736,17 +732,17 @@ static inline void point_store(uint8_t *res, uint64_t *p)
 static inline bool aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *p_x = b;
-  uint8_t *p_y = b + (uint32_t)32U;
+  uint8_t *p_y = b + 32U;
   uint64_t *bn_p_x = p;
-  uint64_t *bn_p_y = p + (uint32_t)4U;
+  uint64_t *bn_p_y = p + 4U;
   bn_from_bytes_be4(bn_p_x, p_x);
   bn_from_bytes_be4(bn_p_y, p_y);
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t lessX = bn_is_lt_prime_mask4(px);
   uint64_t lessY = bn_is_lt_prime_mask4(py);
   uint64_t res = lessX & lessY;
-  bool is_xy_valid = res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_xy_valid = res == 0xFFFFFFFFFFFFFFFFULL;
   if (!is_xy_valid)
   {
     return false;
@@ -769,15 +765,15 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bn_from_bytes_be4(x, xb);
   uint64_t is_x_valid = bn_is_lt_prime_mask4(x);
-  bool is_x_valid1 = is_x_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_x_valid1 = is_x_valid == 0xFFFFFFFFFFFFFFFFULL;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid1)
   {
     return false;
@@ -797,14 +793,14 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
   from_mont(y, yM);
   fsqr0(yM, yM);
   uint64_t r = feq_mask(yM, y2M);
-  bool is_y_valid = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_y_valid = r == 0xFFFFFFFFFFFFFFFFULL;
   bool is_y_valid0 = is_y_valid;
   if (!is_y_valid0)
   {
     return false;
   }
-  uint64_t is_y_odd1 = y[0U] & (uint64_t)1U;
-  bool is_y_odd2 = is_y_odd1 == (uint64_t)1U;
+  uint64_t is_y_odd1 = y[0U] & 1ULL;
+  bool is_y_odd2 = is_y_odd1 == 1ULL;
   fnegate_conditional_vartime(y, is_y_odd2 != is_y_odd);
   return true;
 }
@@ -813,18 +809,18 @@ static inline void point_double(uint64_t *res, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *x = p;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *z = p + 8U;
   uint64_t *x3 = res;
-  uint64_t *y3 = res + (uint32_t)4U;
-  uint64_t *z3 = res + (uint32_t)8U;
+  uint64_t *y3 = res + 4U;
+  uint64_t *z3 = res + 8U;
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)4U;
-  uint64_t *t2 = tmp + (uint32_t)8U;
-  uint64_t *t3 = tmp + (uint32_t)12U;
-  uint64_t *t4 = tmp + (uint32_t)16U;
+  uint64_t *t1 = tmp + 4U;
+  uint64_t *t2 = tmp + 8U;
+  uint64_t *t3 = tmp + 12U;
+  uint64_t *t4 = tmp + 16U;
   uint64_t *x1 = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z1 = p + 8U;
   fsqr0(t0, x1);
   fsqr0(t1, y);
   fsqr0(t2, z1);
@@ -865,22 +861,22 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[36U] = { 0U };
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)24U;
+  uint64_t *t1 = tmp + 24U;
   uint64_t *x3 = t1;
-  uint64_t *y3 = t1 + (uint32_t)4U;
-  uint64_t *z3 = t1 + (uint32_t)8U;
+  uint64_t *y3 = t1 + 4U;
+  uint64_t *z3 = t1 + 8U;
   uint64_t *t01 = t0;
-  uint64_t *t11 = t0 + (uint32_t)4U;
-  uint64_t *t2 = t0 + (uint32_t)8U;
-  uint64_t *t3 = t0 + (uint32_t)12U;
-  uint64_t *t4 = t0 + (uint32_t)16U;
-  uint64_t *t5 = t0 + (uint32_t)20U;
+  uint64_t *t11 = t0 + 4U;
+  uint64_t *t2 = t0 + 8U;
+  uint64_t *t3 = t0 + 12U;
+  uint64_t *t4 = t0 + 16U;
+  uint64_t *t5 = t0 + 20U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)4U;
-  uint64_t *z10 = p + (uint32_t)8U;
+  uint64_t *y1 = p + 4U;
+  uint64_t *z10 = p + 8U;
   uint64_t *x20 = q;
-  uint64_t *y20 = q + (uint32_t)4U;
-  uint64_t *z20 = q + (uint32_t)8U;
+  uint64_t *y20 = q + 4U;
+  uint64_t *z20 = q + 8U;
   fmul0(t01, x1, x20);
   fmul0(t11, y1, y20);
   fmul0(t2, z10, z20);
@@ -888,10 +884,10 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t4, x20, y20);
   fmul0(t3, t3, t4);
   fadd0(t4, t01, t11);
-  uint64_t *y10 = p + (uint32_t)4U;
-  uint64_t *z11 = p + (uint32_t)8U;
-  uint64_t *y2 = q + (uint32_t)4U;
-  uint64_t *z21 = q + (uint32_t)8U;
+  uint64_t *y10 = p + 4U;
+  uint64_t *z11 = p + 8U;
+  uint64_t *y2 = q + 4U;
+  uint64_t *z21 = q + 8U;
   fsub0(t3, t3, t4);
   fadd0(t4, y10, z11);
   fadd0(t5, y2, z21);
@@ -899,9 +895,9 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t5, t11, t2);
   fsub0(t4, t4, t5);
   uint64_t *x10 = p;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *z1 = p + 8U;
   uint64_t *x2 = q;
-  uint64_t *z2 = q + (uint32_t)8U;
+  uint64_t *z2 = q + 8U;
   fadd0(x3, x10, z1);
   fadd0(y3, x2, z2);
   fmul0(x3, x3, y3);
@@ -932,7 +928,7 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fmul0(z3, t4, z3);
   fmul0(t11, t3, t01);
   fadd0(z3, z3, t11);
-  memcpy(res, t1, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(res, t1, 12U * sizeof (uint64_t));
 }
 
 static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
@@ -940,41 +936,37 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
   uint64_t table[192U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)12U;
+  uint64_t *t1 = table + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, p, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, p, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 12U;
     point_add(tmp, p, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   make_point_at_inf(res);
   uint64_t tmp0[12U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 12U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)12U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 12U;
       KRML_MAYBE_FOR12(i,
-        (uint32_t)0U,
-        (uint32_t)12U,
-        (uint32_t)1U,
+        0U,
+        12U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -984,17 +976,17 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 12U;
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1007,64 +999,58 @@ static inline void point_mul_g(uint64_t *res, uint64_t *scalar)
   uint64_t
   q2[12U] =
     {
-      (uint64_t)1499621593102562565U, (uint64_t)16692369783039433128U,
-      (uint64_t)15337520135922861848U, (uint64_t)5455737214495366228U,
-      (uint64_t)17827017231032529600U, (uint64_t)12413621606240782649U,
-      (uint64_t)2290483008028286132U, (uint64_t)15752017553340844820U,
-      (uint64_t)4846430910634234874U, (uint64_t)10861682798464583253U,
-      (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U
+      1499621593102562565ULL, 16692369783039433128ULL, 15337520135922861848ULL,
+      5455737214495366228ULL, 17827017231032529600ULL, 12413621606240782649ULL,
+      2290483008028286132ULL, 15752017553340844820ULL, 4846430910634234874ULL,
+      10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL
     };
   uint64_t
   q3[12U] =
     {
-      (uint64_t)14619254753077084366U, (uint64_t)13913835116514008593U,
-      (uint64_t)15060744674088488145U, (uint64_t)17668414598203068685U,
-      (uint64_t)10761169236902342334U, (uint64_t)15467027479157446221U,
-      (uint64_t)14989185522423469618U, (uint64_t)14354539272510107003U,
-      (uint64_t)14298211796392133693U, (uint64_t)13270323784253711450U,
-      (uint64_t)13380964971965046957U, (uint64_t)8686204248456909699U
+      14619254753077084366ULL, 13913835116514008593ULL, 15060744674088488145ULL,
+      17668414598203068685ULL, 10761169236902342334ULL, 15467027479157446221ULL,
+      14989185522423469618ULL, 14354539272510107003ULL, 14298211796392133693ULL,
+      13270323784253711450ULL, 13380964971965046957ULL, 8686204248456909699ULL
     };
   uint64_t
   q4[12U] =
     {
-      (uint64_t)7870395003430845958U, (uint64_t)18001862936410067720U,
-      (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-      (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U,
-      (uint64_t)7139806720777708306U, (uint64_t)8253938546650739833U,
-      (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-      (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U
+      7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+      5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL,
+      7139806720777708306ULL, 8253938546650739833ULL, 17490482834545705718ULL,
+      1065249776797037500ULL, 5018258455937968775ULL, 14100621120178668337ULL
     };
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   make_point_at_inf(res);
   uint64_t tmp[12U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     point_add(res, res, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     point_add(res, res, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     point_add(res, res, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     point_add(res, res, tmp););
-  KRML_HOST_IGNORE(q1);
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q1);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1075,54 +1061,48 @@ point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t
   uint64_t table2[384U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)12U;
+  uint64_t *t1 = table2 + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, q2, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table2 + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 12U;
     point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   uint64_t tmp0[12U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)12U;
-  memcpy(res, (uint64_t *)a_bits_l, (uint32_t)12U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 12U;
+  memcpy(res, (uint64_t *)a_bits_l, 12U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)12U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)12U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 12U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 12U * sizeof (uint64_t));
   point_add(res, res, tmp0);
   uint64_t tmp1[12U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, point_double(res, res););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)12U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)12U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
   }
 }
@@ -1132,7 +1112,7 @@ static inline uint64_t bn_is_lt_order_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_order(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t bn_is_lt_order_and_gt_zero_mask4(uint64_t *f)
@@ -1161,61 +1141,61 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_order(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)0xccd1c8aaee00bc4fU * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 0xccd1c8aaee00bc4fULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -1224,7 +1204,7 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 static inline void from_qmont(uint64_t *res, uint64_t *x)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, x, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, x, 4U * sizeof (uint64_t));
   qmont_reduction(res, tmp);
 }
 
@@ -1246,18 +1226,18 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1265,7 +1245,7 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 bool
@@ -1277,19 +1257,19 @@ Hacl_Impl_P256_DH_ecp256dh_r(
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1301,27 +1281,27 @@ Hacl_Impl_P256_DH_ecp256dh_r(
     point_mul(ss_proj, sk, pk);
     point_store(shared_secret, ss_proj);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
 static inline void qinv(uint64_t *res, uint64_t *r)
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *x6 = tmp;
-  uint64_t *x_11 = tmp + (uint32_t)4U;
-  uint64_t *x_101 = tmp + (uint32_t)8U;
-  uint64_t *x_111 = tmp + (uint32_t)12U;
-  uint64_t *x_1111 = tmp + (uint32_t)16U;
-  uint64_t *x_10101 = tmp + (uint32_t)20U;
-  uint64_t *x_101111 = tmp + (uint32_t)24U;
-  memcpy(x6, r, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x_11 = tmp + 4U;
+  uint64_t *x_101 = tmp + 8U;
+  uint64_t *x_111 = tmp + 12U;
+  uint64_t *x_1111 = tmp + 16U;
+  uint64_t *x_10101 = tmp + 20U;
+  uint64_t *x_101111 = tmp + 24U;
+  memcpy(x6, r, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_11, x6, r);
   qmul(x_101, x6, x_11);
   qmul(x_111, x6, x_101);
-  memcpy(x6, x_101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
@@ -1330,86 +1310,86 @@ static inline void qinv(uint64_t *res, uint64_t *r)
     qsqr(x6, x6);
   }
   qmul(x_10101, x6, r);
-  memcpy(x6, x_10101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_10101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_101111, x_101, x6);
   qmul(x6, x_10101, x6);
   uint64_t tmp1[4U] = { 0U };
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(x6, x6););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(x6, x6););
   qmul(x6, x6, x_11);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x6);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, qsqr(x6, x6););
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, qsqr(x6, x6););
   qmul(x6, x6, tmp1);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR9(i, (uint32_t)0U, (uint32_t)9U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR9(i, 0U, 9U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR10(i, (uint32_t)0U, (uint32_t)10U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR10(i, 0U, 10U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR7(i, (uint32_t)0U, (uint32_t)7U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR7(i, 0U, 7U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(res, x6, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  memcpy(res, x6, 4U * sizeof (uint64_t));
 }
 
 static inline void qmul_mont(uint64_t *sinv, uint64_t *b, uint64_t *res)
@@ -1429,20 +1409,16 @@ ecdsa_verify_msg_as_qelem(
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)12U;
-  uint64_t *s_q = tmp + (uint32_t)16U;
-  uint64_t *u1 = tmp + (uint32_t)20U;
-  uint64_t *u2 = tmp + (uint32_t)24U;
+  uint64_t *r_q = tmp + 12U;
+  uint64_t *s_q = tmp + 16U;
+  uint64_t *u1 = tmp + 20U;
+  uint64_t *u2 = tmp + 24U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bn_from_bytes_be4(r_q, signature_r);
   bn_from_bytes_be4(s_q, signature_s);
   uint64_t is_r_valid = bn_is_lt_order_and_gt_zero_mask4(r_q);
   uint64_t is_s_valid = bn_is_lt_order_and_gt_zero_mask4(s_q);
-  bool
-  is_rs_valid =
-    is_r_valid
-    == (uint64_t)0xFFFFFFFFFFFFFFFFU
-    && is_s_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_rs_valid = is_r_valid == 0xFFFFFFFFFFFFFFFFULL && is_s_valid == 0xFFFFFFFFFFFFFFFFULL;
   if (!(is_pk_valid && is_rs_valid))
   {
     return false;
@@ -1474,20 +1450,20 @@ ecdsa_sign_msg_as_qelem(
 {
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   bn_from_bytes_be4(d_a, private_key);
   uint64_t is_b_valid0 = bn_is_lt_order_and_gt_zero_mask4(d_a);
   uint64_t oneq0[4U] = { 0U };
-  oneq0[0U] = (uint64_t)1U;
-  oneq0[1U] = (uint64_t)0U;
-  oneq0[2U] = (uint64_t)0U;
-  oneq0[3U] = (uint64_t)0U;
+  oneq0[0U] = 1ULL;
+  oneq0[1U] = 0ULL;
+  oneq0[2U] = 0ULL;
+  oneq0[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq0[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
@@ -1496,14 +1472,14 @@ ecdsa_sign_msg_as_qelem(
   bn_from_bytes_be4(k_q, nonce);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(k_q);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1524,7 +1500,7 @@ ecdsa_sign_msg_as_qelem(
   uint64_t is_r_zero = bn_is_zero_mask4(r_q);
   uint64_t is_s_zero = bn_is_zero_mask4(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1572,7 +1548,7 @@ Hacl_P256_ecdsa_sign_p256_sha2(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
   Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1605,7 +1581,7 @@ Hacl_P256_ecdsa_sign_p256_sha384(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
   Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1638,7 +1614,7 @@ Hacl_P256_ecdsa_sign_p256_sha512(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
   Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1680,8 +1656,8 @@ Hacl_P256_ecdsa_sign_p256_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1717,7 +1693,7 @@ Hacl_P256_ecdsa_verif_p256_sha2(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
   Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1748,7 +1724,7 @@ Hacl_P256_ecdsa_verif_p256_sha384(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
   Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1779,7 +1755,7 @@ Hacl_P256_ecdsa_verif_p256_sha512(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
   Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1814,8 +1790,8 @@ Hacl_P256_ecdsa_verif_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1864,7 +1840,7 @@ bool Hacl_P256_validate_private_key(uint8_t *private_key)
   uint64_t bn_sk[4U] = { 0U };
   bn_from_bytes_be4(bn_sk, private_key);
   uint64_t res = bn_is_lt_order_and_gt_zero_mask4(bn_sk);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /*******************************************************************************
@@ -1893,11 +1869,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_P256_uncompressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1915,12 +1891,12 @@ bool Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint64_t xa[4U] = { 0U };
   uint64_t ya[4U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    bn_to_bytes_be4(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    bn_to_bytes_be4(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -1935,8 +1911,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_P256_raw_to_uncompressed(uint8_t *pk_raw, uint8_t *pk)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -1950,12 +1926,12 @@ Convert a public key from raw to its compressed form.
 void Hacl_P256_raw_to_compressed(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint64_t bn_f[4U] = { 0U };
   bn_from_bytes_be4(bn_f, pk_y);
-  uint64_t is_odd_f = bn_f[0U] & (uint64_t)1U;
-  pk[0U] = (uint8_t)is_odd_f + (uint8_t)0x02U;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  uint64_t is_odd_f = bn_f[0U] & 1ULL;
+  pk[0U] = (uint32_t)(uint8_t)is_odd_f + 0x02U;
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
diff --git a/src/Hacl_Poly1305_128.c b/src/Hacl_Poly1305_128.c
index f400fe82..ad1d8639 100644
--- a/src/Hacl_Poly1305_128.c
+++ b/src/Hacl_Poly1305_128.c
@@ -30,33 +30,28 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(b);
-  Lib_IntVector_Intrinsics_vec128
-  b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + (uint32_t)16U);
+  Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + 16U);
   Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
   Lib_IntVector_Intrinsics_vec128 hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(lo,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f10 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
   Lib_IntVector_Intrinsics_vec128 f02 = f00;
   Lib_IntVector_Intrinsics_vec128 f12 = f10;
   Lib_IntVector_Intrinsics_vec128 f22 = f20;
@@ -67,7 +62,7 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   e[2U] = f22;
   e[3U] = f32;
   e[4U] = f42;
-  uint64_t b10 = (uint64_t)0x1000000U;
+  uint64_t b10 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b10);
   Lib_IntVector_Intrinsics_vec128 f43 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f43, mask);
@@ -81,16 +76,11 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   Lib_IntVector_Intrinsics_vec128 e2 = e[2U];
   Lib_IntVector_Intrinsics_vec128 e3 = e[3U];
   Lib_IntVector_Intrinsics_vec128 e4 = e[4U];
-  Lib_IntVector_Intrinsics_vec128
-  f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, (uint64_t)0U, (uint32_t)1U);
+  Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, 0ULL, 1U);
   Lib_IntVector_Intrinsics_vec128 f01 = Lib_IntVector_Intrinsics_vec128_add64(f0, e0);
   Lib_IntVector_Intrinsics_vec128 f11 = Lib_IntVector_Intrinsics_vec128_add64(f1, e1);
   Lib_IntVector_Intrinsics_vec128 f21 = Lib_IntVector_Intrinsics_vec128_add64(f2, e2);
@@ -115,7 +105,7 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r = p;
-  Lib_IntVector_Intrinsics_vec128 *r2 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = p + 10U;
   Lib_IntVector_Intrinsics_vec128 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec128 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec128 a2 = out[2U];
@@ -141,14 +131,10 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   r231 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r23, r13);
   Lib_IntVector_Intrinsics_vec128
   r241 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r24, r14);
-  Lib_IntVector_Intrinsics_vec128
-  r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec128 r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, 5ULL);
   Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_mul64(r201, a0);
   Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_mul64(r211, a0);
   Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_mul64(r221, a0);
@@ -239,37 +225,28 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128 t2 = a25;
   Lib_IntVector_Intrinsics_vec128 t3 = a35;
   Lib_IntVector_Intrinsics_vec128 t4 = a45;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -302,41 +279,36 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128
   o00 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
@@ -351,7 +323,7 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
 void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec128_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec128_zero;
@@ -360,41 +332,38 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec128_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec128 r_vec0 = Lib_IntVector_Intrinsics_vec128_load64(lo1);
   Lib_IntVector_Intrinsics_vec128 r_vec1 = Lib_IntVector_Intrinsics_vec128_load64(hi1);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f15 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec128 f0 = f00;
   Lib_IntVector_Intrinsics_vec128 f1 = f15;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -410,11 +379,11 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec128 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec128 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -511,37 +480,28 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 t2 = a24;
   Lib_IntVector_Intrinsics_vec128 t3 = a34;
   Lib_IntVector_Intrinsics_vec128 t4 = a44;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -559,47 +519,43 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec128 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec128 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, 5ULL);
 }
 
 void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   uint64_t u0 = load64_le(text);
   uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
+  uint64_t u = load64_le(text + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
   Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec128 f01 = f010;
   Lib_IntVector_Intrinsics_vec128 f111 = f110;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -610,12 +566,12 @@ void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, ui
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
   Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -730,37 +686,28 @@ void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, ui
   Lib_IntVector_Intrinsics_vec128 t2 = a26;
   Lib_IntVector_Intrinsics_vec128 t3 = a36;
   Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -782,52 +729,47 @@ Hacl_Poly1305_128_poly1305_update(
   uint8_t *text
 )
 {
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint32_t sz_block = (uint32_t)32U;
+  uint32_t sz_block = 32U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t0;
     Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -838,12 +780,12 @@ Hacl_Poly1305_128_poly1305_update(
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -948,37 +890,28 @@ Hacl_Poly1305_128_poly1305_update(
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -1016,41 +949,37 @@ Hacl_Poly1305_128_poly1305_update(
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1061,12 +990,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1181,37 +1110,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1225,41 +1145,37 @@ Hacl_Poly1305_128_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1270,12 +1186,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1390,37 +1306,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1445,7 +1352,7 @@ Hacl_Poly1305_128_poly1305_finish(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec128 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec128 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec128 f23 = acc[2U];
@@ -1456,41 +1363,36 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp00 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec128
   tmp10 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec128
   tmp20 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec128
   tmp30 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec128
   tmp40 =
     Lib_IntVector_Intrinsics_vec128_and(l4,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec128 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec128 f310 = tmp30;
@@ -1500,49 +1402,42 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l5 = Lib_IntVector_Intrinsics_vec128_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l5,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l6,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l7,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l8,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec128
   f02 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec128 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec128 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec128 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec128
-  mh = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  ml = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec128 mh = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 ml = Lib_IntVector_Intrinsics_vec128_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec128
   mask1 =
@@ -1582,29 +1477,29 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec128 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec128 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
 }
 
 void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
diff --git a/src/Hacl_Poly1305_256.c b/src/Hacl_Poly1305_256.c
index db28cdc7..a60bc238 100644
--- a/src/Hacl_Poly1305_256.c
+++ b/src/Hacl_Poly1305_256.c
@@ -30,32 +30,24 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(b);
-  Lib_IntVector_Intrinsics_vec256
-  hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + (uint32_t)32U);
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+  Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + 32U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
   Lib_IntVector_Intrinsics_vec256 m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
   Lib_IntVector_Intrinsics_vec256
   m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-  Lib_IntVector_Intrinsics_vec256
-  m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-  Lib_IntVector_Intrinsics_vec256
-  m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+  Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+  Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
   Lib_IntVector_Intrinsics_vec256 m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-  Lib_IntVector_Intrinsics_vec256
-  t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)4U);
+  Lib_IntVector_Intrinsics_vec256 t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 4U);
   Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t2, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
   Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t1, mask26);
   Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)30U);
+  Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 30U);
   Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+  Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
   Lib_IntVector_Intrinsics_vec256 o0 = o5;
   Lib_IntVector_Intrinsics_vec256 o1 = o10;
   Lib_IntVector_Intrinsics_vec256 o2 = o20;
@@ -66,7 +58,7 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   e[2U] = o2;
   e[3U] = o3;
   e[4U] = o4;
-  uint64_t b1 = (uint64_t)0x1000000U;
+  uint64_t b1 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b1);
   Lib_IntVector_Intrinsics_vec256 f40 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f40, mask);
@@ -88,28 +80,28 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   Lib_IntVector_Intrinsics_vec256
   r01 =
     Lib_IntVector_Intrinsics_vec256_insert64(r0,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc0, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc0, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r11 =
     Lib_IntVector_Intrinsics_vec256_insert64(r1,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc1, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc1, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r21 =
     Lib_IntVector_Intrinsics_vec256_insert64(r2,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc2, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc2, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r31 =
     Lib_IntVector_Intrinsics_vec256_insert64(r3,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc3, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc3, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r41 =
     Lib_IntVector_Intrinsics_vec256_insert64(r4,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc4, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc4, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_add64(r01, e0);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_add64(r11, e1);
   Lib_IntVector_Intrinsics_vec256 f2 = Lib_IntVector_Intrinsics_vec256_add64(r21, e2);
@@ -134,8 +126,8 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r = p;
-  Lib_IntVector_Intrinsics_vec256 *r_5 = p + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *r4 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec256 *r_5 = p + 5U;
+  Lib_IntVector_Intrinsics_vec256 *r4 = p + 10U;
   Lib_IntVector_Intrinsics_vec256 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec256 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec256 a2 = out[2U];
@@ -245,37 +237,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t20 = a250;
   Lib_IntVector_Intrinsics_vec256 t30 = a350;
   Lib_IntVector_Intrinsics_vec256 t40 = a450;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 r20 = x020;
@@ -373,37 +358,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t21 = a251;
   Lib_IntVector_Intrinsics_vec256 t31 = a351;
   Lib_IntVector_Intrinsics_vec256 t41 = a451;
-  Lib_IntVector_Intrinsics_vec256
-  mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask261 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+  Lib_IntVector_Intrinsics_vec256 z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, 26U);
   Lib_IntVector_Intrinsics_vec256 x03 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
   Lib_IntVector_Intrinsics_vec256 x33 = Lib_IntVector_Intrinsics_vec256_and(t31, mask261);
   Lib_IntVector_Intrinsics_vec256 x13 = Lib_IntVector_Intrinsics_vec256_add64(t11, z04);
   Lib_IntVector_Intrinsics_vec256 x43 = Lib_IntVector_Intrinsics_vec256_add64(t41, z14);
-  Lib_IntVector_Intrinsics_vec256
-  z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, 26U);
+  Lib_IntVector_Intrinsics_vec256 z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, 26U);
+  Lib_IntVector_Intrinsics_vec256 t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z111, t6);
   Lib_IntVector_Intrinsics_vec256 x111 = Lib_IntVector_Intrinsics_vec256_and(x13, mask261);
   Lib_IntVector_Intrinsics_vec256 x411 = Lib_IntVector_Intrinsics_vec256_and(x43, mask261);
   Lib_IntVector_Intrinsics_vec256 x22 = Lib_IntVector_Intrinsics_vec256_add64(t21, z011);
   Lib_IntVector_Intrinsics_vec256 x011 = Lib_IntVector_Intrinsics_vec256_add64(x03, z120);
+  Lib_IntVector_Intrinsics_vec256 z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, (uint32_t)26U);
+  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, 26U);
   Lib_IntVector_Intrinsics_vec256 x211 = Lib_IntVector_Intrinsics_vec256_and(x22, mask261);
   Lib_IntVector_Intrinsics_vec256 x021 = Lib_IntVector_Intrinsics_vec256_and(x011, mask261);
   Lib_IntVector_Intrinsics_vec256 x311 = Lib_IntVector_Intrinsics_vec256_add64(x33, z021);
   Lib_IntVector_Intrinsics_vec256 x121 = Lib_IntVector_Intrinsics_vec256_add64(x111, z131);
   Lib_IntVector_Intrinsics_vec256
-  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, (uint32_t)26U);
+  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, 26U);
   Lib_IntVector_Intrinsics_vec256 x321 = Lib_IntVector_Intrinsics_vec256_and(x311, mask261);
   Lib_IntVector_Intrinsics_vec256 x421 = Lib_IntVector_Intrinsics_vec256_add64(x411, z031);
   Lib_IntVector_Intrinsics_vec256 r30 = x021;
@@ -441,14 +419,10 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   v34344 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r44, r34);
   Lib_IntVector_Intrinsics_vec256
   r12344 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34344, v12124);
-  Lib_IntVector_Intrinsics_vec256
-  r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec256 r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, 5ULL);
   Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_mul64(r12340, a0);
   Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_mul64(r12341, a0);
   Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_mul64(r12342, a0);
@@ -539,37 +513,28 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t2 = a25;
   Lib_IntVector_Intrinsics_vec256 t3 = a35;
   Lib_IntVector_Intrinsics_vec256 t4 = a45;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z121 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z121);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -612,41 +577,36 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l0 = Lib_IntVector_Intrinsics_vec256_add64(v21, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(v22, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(v23, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(v24, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256
   o00 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec256 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec256 o3 = tmp3;
@@ -661,7 +621,7 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
 void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec256_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec256_zero;
@@ -670,41 +630,38 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec256_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec256 r_vec0 = Lib_IntVector_Intrinsics_vec256_load64(lo1);
   Lib_IntVector_Intrinsics_vec256 r_vec1 = Lib_IntVector_Intrinsics_vec256_load64(hi1);
   Lib_IntVector_Intrinsics_vec256
   f00 =
     Lib_IntVector_Intrinsics_vec256_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f15 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec256 f0 = f00;
   Lib_IntVector_Intrinsics_vec256 f1 = f15;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -720,11 +677,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec256 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec256 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r10 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r20 = r[2U];
@@ -829,37 +786,30 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t20 = a240;
   Lib_IntVector_Intrinsics_vec256 t30 = a340;
   Lib_IntVector_Intrinsics_vec256 t40 = a440;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 o00 = x020;
@@ -877,11 +827,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f221 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f231 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f241 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r00 = rn[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -980,37 +930,28 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t2 = a24;
   Lib_IntVector_Intrinsics_vec256 t3 = a34;
   Lib_IntVector_Intrinsics_vec256 t4 = a44;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z120);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1028,47 +969,43 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, 5ULL);
 }
 
 void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   uint64_t u0 = load64_le(text);
   uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
+  uint64_t u = load64_le(text + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec256 f01 = f010;
   Lib_IntVector_Intrinsics_vec256 f111 = f110;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1079,12 +1016,12 @@ void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, ui
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
   Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1199,37 +1136,28 @@ void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, ui
   Lib_IntVector_Intrinsics_vec256 t2 = a26;
   Lib_IntVector_Intrinsics_vec256 t3 = a36;
   Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1251,54 +1179,48 @@ Hacl_Poly1305_256_poly1305_update(
   uint8_t *text
 )
 {
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint32_t sz_block = (uint32_t)64U;
+  uint32_t sz_block = 64U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t0;
     Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -1309,12 +1231,12 @@ Hacl_Poly1305_256_poly1305_update(
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -1419,37 +1341,28 @@ Hacl_Poly1305_256_poly1305_update(
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -1487,41 +1400,37 @@ Hacl_Poly1305_256_poly1305_update(
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1532,12 +1441,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1652,37 +1561,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1696,41 +1596,37 @@ Hacl_Poly1305_256_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1741,12 +1637,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1861,37 +1757,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1916,7 +1803,7 @@ Hacl_Poly1305_256_poly1305_finish(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec256 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec256 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec256 f23 = acc[2U];
@@ -1927,41 +1814,36 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp00 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec256
   tmp10 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec256
   tmp20 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec256
   tmp30 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256 l4 = Lib_IntVector_Intrinsics_vec256_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec256
   tmp40 =
     Lib_IntVector_Intrinsics_vec256_and(l4,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec256_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec256 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec256 f310 = tmp30;
@@ -1971,49 +1853,42 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l5 = Lib_IntVector_Intrinsics_vec256_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l5,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec256 l6 = Lib_IntVector_Intrinsics_vec256_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l6,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec256 l7 = Lib_IntVector_Intrinsics_vec256_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l7,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec256 l8 = Lib_IntVector_Intrinsics_vec256_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l8,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec256
   f02 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec256 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec256 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec256 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec256
-  mh = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  ml = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec256 mh = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 ml = Lib_IntVector_Intrinsics_vec256_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec256
   mask1 =
@@ -2053,29 +1928,29 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec256 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec256 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
 }
 
 void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
diff --git a/src/Hacl_Poly1305_32.c b/src/Hacl_Poly1305_32.c
index 5192559b..9761e157 100644
--- a/src/Hacl_Poly1305_32.c
+++ b/src/Hacl_Poly1305_32.c
@@ -28,32 +28,32 @@
 void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
 {
   uint64_t *acc = ctx;
-  uint64_t *pre = ctx + (uint32_t)5U;
+  uint64_t *pre = ctx + 5U;
   uint8_t *kr = key;
-  acc[0U] = (uint64_t)0U;
-  acc[1U] = (uint64_t)0U;
-  acc[2U] = (uint64_t)0U;
-  acc[3U] = (uint64_t)0U;
-  acc[4U] = (uint64_t)0U;
+  acc[0U] = 0ULL;
+  acc[1U] = 0ULL;
+  acc[2U] = 0ULL;
+  acc[3U] = 0ULL;
+  acc[4U] = 0ULL;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
-  uint64_t *rn = pre + (uint32_t)10U;
-  uint64_t *rn_5 = pre + (uint32_t)15U;
+  uint64_t *r5 = pre + 5U;
+  uint64_t *rn = pre + 10U;
+  uint64_t *rn_5 = pre + 15U;
   uint64_t r_vec0 = lo1;
   uint64_t r_vec1 = hi1;
-  uint64_t f00 = r_vec0 & (uint64_t)0x3ffffffU;
-  uint64_t f10 = r_vec0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = r_vec0 >> (uint32_t)52U | (r_vec1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = r_vec1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = r_vec1 >> (uint32_t)40U;
+  uint64_t f00 = r_vec0 & 0x3ffffffULL;
+  uint64_t f10 = r_vec0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = r_vec0 >> 52U | (r_vec1 & 0x3fffULL) << 12U;
+  uint64_t f30 = r_vec1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = r_vec1 >> 40U;
   uint64_t f0 = f00;
   uint64_t f1 = f10;
   uint64_t f2 = f20;
@@ -69,11 +69,11 @@ void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
   uint64_t f22 = r[2U];
   uint64_t f23 = r[3U];
   uint64_t f24 = r[4U];
-  r5[0U] = f200 * (uint64_t)5U;
-  r5[1U] = f21 * (uint64_t)5U;
-  r5[2U] = f22 * (uint64_t)5U;
-  r5[3U] = f23 * (uint64_t)5U;
-  r5[4U] = f24 * (uint64_t)5U;
+  r5[0U] = f200 * 5ULL;
+  r5[1U] = f21 * 5ULL;
+  r5[2U] = f22 * 5ULL;
+  r5[3U] = f23 * 5ULL;
+  r5[4U] = f24 * 5ULL;
   rn[0U] = r[0U];
   rn[1U] = r[1U];
   rn[2U] = r[2U];
@@ -88,20 +88,20 @@ void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
 
 void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
 {
-  uint64_t *pre = ctx + (uint32_t)5U;
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
   uint64_t e[5U] = { 0U };
   uint64_t u0 = load64_le(text);
   uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
+  uint64_t u = load64_le(text + 8U);
   uint64_t hi = u;
   uint64_t f0 = lo;
   uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
+  uint64_t f010 = f0 & 0x3ffffffULL;
+  uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+  uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = f1 >> 40U;
   uint64_t f01 = f010;
   uint64_t f111 = f110;
   uint64_t f2 = f20;
@@ -112,12 +112,12 @@ void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   uint64_t mask = b;
   uint64_t f4 = e[4U];
   e[4U] = f4 | mask;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
+  uint64_t *r5 = pre + 5U;
   uint64_t r0 = r[0U];
   uint64_t r1 = r[1U];
   uint64_t r2 = r[2U];
@@ -172,28 +172,28 @@ void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
   uint64_t t2 = a26;
   uint64_t t3 = a36;
   uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
+  uint64_t mask26 = 0x3ffffffULL;
+  uint64_t z0 = t0 >> 26U;
+  uint64_t z1 = t3 >> 26U;
   uint64_t x0 = t0 & mask26;
   uint64_t x3 = t3 & mask26;
   uint64_t x1 = t1 + z0;
   uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
+  uint64_t z01 = x1 >> 26U;
+  uint64_t z11 = x4 >> 26U;
+  uint64_t t = z11 << 2U;
   uint64_t z12 = z11 + t;
   uint64_t x11 = x1 & mask26;
   uint64_t x41 = x4 & mask26;
   uint64_t x2 = t2 + z01;
   uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
+  uint64_t z02 = x2 >> 26U;
+  uint64_t z13 = x01 >> 26U;
   uint64_t x21 = x2 & mask26;
   uint64_t x02 = x01 & mask26;
   uint64_t x31 = x3 + z02;
   uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
+  uint64_t z03 = x31 >> 26U;
   uint64_t x32 = x31 & mask26;
   uint64_t x42 = x41 + z03;
   uint64_t o0 = x02;
@@ -210,25 +210,25 @@ void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
 
 void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
 {
-  uint64_t *pre = ctx + (uint32_t)5U;
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
-  uint32_t nb = len / (uint32_t)16U;
-  uint32_t rem = len % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len / 16U;
+  uint32_t rem = len % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = text + i * (uint32_t)16U;
+    uint8_t *block = text + i * 16U;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -239,12 +239,12 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r[0U];
     uint64_t r1 = r[1U];
     uint64_t r2 = r[2U];
@@ -299,28 +299,28 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -334,23 +334,23 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = text + nb * (uint32_t)16U;
+    uint8_t *last = text + nb * 16U;
     uint64_t e[5U] = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -361,12 +361,12 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     uint64_t mask = b;
-    uint64_t fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = fi | mask;
+    uint64_t fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = fi | mask;
     uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r[0U];
     uint64_t r1 = r[1U];
     uint64_t r2 = r[2U];
@@ -421,28 +421,28 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -462,54 +462,54 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
 void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
 {
   uint64_t *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   uint64_t f0 = acc[0U];
   uint64_t f13 = acc[1U];
   uint64_t f23 = acc[2U];
   uint64_t f33 = acc[3U];
   uint64_t f40 = acc[4U];
-  uint64_t l0 = f0 + (uint64_t)0U;
-  uint64_t tmp00 = l0 & (uint64_t)0x3ffffffU;
-  uint64_t c00 = l0 >> (uint32_t)26U;
+  uint64_t l0 = f0 + 0ULL;
+  uint64_t tmp00 = l0 & 0x3ffffffULL;
+  uint64_t c00 = l0 >> 26U;
   uint64_t l1 = f13 + c00;
-  uint64_t tmp10 = l1 & (uint64_t)0x3ffffffU;
-  uint64_t c10 = l1 >> (uint32_t)26U;
+  uint64_t tmp10 = l1 & 0x3ffffffULL;
+  uint64_t c10 = l1 >> 26U;
   uint64_t l2 = f23 + c10;
-  uint64_t tmp20 = l2 & (uint64_t)0x3ffffffU;
-  uint64_t c20 = l2 >> (uint32_t)26U;
+  uint64_t tmp20 = l2 & 0x3ffffffULL;
+  uint64_t c20 = l2 >> 26U;
   uint64_t l3 = f33 + c20;
-  uint64_t tmp30 = l3 & (uint64_t)0x3ffffffU;
-  uint64_t c30 = l3 >> (uint32_t)26U;
+  uint64_t tmp30 = l3 & 0x3ffffffULL;
+  uint64_t c30 = l3 >> 26U;
   uint64_t l4 = f40 + c30;
-  uint64_t tmp40 = l4 & (uint64_t)0x3ffffffU;
-  uint64_t c40 = l4 >> (uint32_t)26U;
-  uint64_t f010 = tmp00 + c40 * (uint64_t)5U;
+  uint64_t tmp40 = l4 & 0x3ffffffULL;
+  uint64_t c40 = l4 >> 26U;
+  uint64_t f010 = tmp00 + c40 * 5ULL;
   uint64_t f110 = tmp10;
   uint64_t f210 = tmp20;
   uint64_t f310 = tmp30;
   uint64_t f410 = tmp40;
-  uint64_t l = f010 + (uint64_t)0U;
-  uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
-  uint64_t c0 = l >> (uint32_t)26U;
+  uint64_t l = f010 + 0ULL;
+  uint64_t tmp0 = l & 0x3ffffffULL;
+  uint64_t c0 = l >> 26U;
   uint64_t l5 = f110 + c0;
-  uint64_t tmp1 = l5 & (uint64_t)0x3ffffffU;
-  uint64_t c1 = l5 >> (uint32_t)26U;
+  uint64_t tmp1 = l5 & 0x3ffffffULL;
+  uint64_t c1 = l5 >> 26U;
   uint64_t l6 = f210 + c1;
-  uint64_t tmp2 = l6 & (uint64_t)0x3ffffffU;
-  uint64_t c2 = l6 >> (uint32_t)26U;
+  uint64_t tmp2 = l6 & 0x3ffffffULL;
+  uint64_t c2 = l6 >> 26U;
   uint64_t l7 = f310 + c2;
-  uint64_t tmp3 = l7 & (uint64_t)0x3ffffffU;
-  uint64_t c3 = l7 >> (uint32_t)26U;
+  uint64_t tmp3 = l7 & 0x3ffffffULL;
+  uint64_t c3 = l7 >> 26U;
   uint64_t l8 = f410 + c3;
-  uint64_t tmp4 = l8 & (uint64_t)0x3ffffffU;
-  uint64_t c4 = l8 >> (uint32_t)26U;
-  uint64_t f02 = tmp0 + c4 * (uint64_t)5U;
+  uint64_t tmp4 = l8 & 0x3ffffffULL;
+  uint64_t c4 = l8 >> 26U;
+  uint64_t f02 = tmp0 + c4 * 5ULL;
   uint64_t f12 = tmp1;
   uint64_t f22 = tmp2;
   uint64_t f32 = tmp3;
   uint64_t f42 = tmp4;
-  uint64_t mh = (uint64_t)0x3ffffffU;
-  uint64_t ml = (uint64_t)0x3fffffbU;
+  uint64_t mh = 0x3ffffffULL;
+  uint64_t ml = 0x3fffffbULL;
   uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
   uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
   uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
@@ -542,24 +542,24 @@ void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
   uint64_t f212 = f2;
   uint64_t f312 = f3;
   uint64_t f41 = f4;
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
 }
 
 void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
diff --git a/src/Hacl_RSAPSS.c b/src/Hacl_RSAPSS.c
index ceb9a6f0..c2e6b022 100644
--- a/src/Hacl_RSAPSS.c
+++ b/src/Hacl_RSAPSS.c
@@ -35,51 +35,51 @@ static inline uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -126,48 +126,48 @@ mgf_hash(
   uint8_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), len + (uint32_t)4U);
-  uint8_t mgfseed_counter[len + (uint32_t)4U];
-  memset(mgfseed_counter, 0U, (len + (uint32_t)4U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), len + 4U);
+  uint8_t mgfseed_counter[len + 4U];
+  memset(mgfseed_counter, 0U, (len + 4U) * sizeof (uint8_t));
   memcpy(mgfseed_counter, mgfseed, len * sizeof (uint8_t));
   uint32_t hLen = hash_len(a);
-  uint32_t n = (maskLen - (uint32_t)1U) / hLen + (uint32_t)1U;
+  uint32_t n = (maskLen - 1U) / hLen + 1U;
   uint32_t accLen = n * hLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), accLen);
   uint8_t acc[accLen];
   memset(acc, 0U, accLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *acc_i = acc + i * hLen;
     uint8_t *c = mgfseed_counter + len;
-    c[0U] = (uint8_t)(i >> (uint32_t)24U);
-    c[1U] = (uint8_t)(i >> (uint32_t)16U);
-    c[2U] = (uint8_t)(i >> (uint32_t)8U);
+    c[0U] = (uint8_t)(i >> 24U);
+    c[1U] = (uint8_t)(i >> 16U);
+    c[2U] = (uint8_t)(i >> 8U);
     c[3U] = (uint8_t)i;
-    hash(a, acc_i, len + (uint32_t)4U, mgfseed_counter);
+    hash(a, acc_i, len + 4U, mgfseed_counter);
   }
   memcpy(res, acc, maskLen * sizeof (uint8_t));
 }
 
 static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 {
-  uint32_t bLen = (bs - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  if (bs == (uint32_t)64U * bLen)
+  uint32_t bLen = (bs - 1U) / 64U + 1U;
+  if (bs == 64U * bLen)
   {
-    return (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    return 0xFFFFFFFFFFFFFFFFULL;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
   uint64_t b2[bLen];
   memset(b2, 0U, bLen * sizeof (uint64_t));
-  uint32_t i0 = bs / (uint32_t)64U;
-  uint32_t j = bs % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+  uint32_t i0 = bs / 64U;
+  uint32_t j = bs % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < bLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   return res;
@@ -175,21 +175,21 @@ static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 
 static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 {
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint64_t bits0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bits0;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint64_t bits0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bits0;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t b2[nLen];
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (modBits - (uint32_t)1U) / (uint32_t)64U;
-  uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = (modBits - 1U) / 64U;
+  uint32_t j = (modBits - 1U) % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   uint64_t m1 = res;
@@ -199,12 +199,12 @@ static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 
 static inline uint64_t check_exponent_u64(uint32_t eBits, uint64_t *e)
 {
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), eLen);
   uint64_t bn_zero[eLen];
   memset(bn_zero, 0U, eLen * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < eLen; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < eLen; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(e[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -231,39 +231,39 @@ pss_encode(
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t m1Hash[hLen];
   memset(m1Hash, 0U, hLen * sizeof (uint8_t));
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t m1[m1Len];
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash, m1Len, m1);
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t dbLen = emLen - hLen - (uint32_t)1U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t dbLen = emLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t db[dbLen];
   memset(db, 0U, dbLen * sizeof (uint8_t));
-  uint32_t last_before_salt = dbLen - saltLen - (uint32_t)1U;
-  db[last_before_salt] = (uint8_t)1U;
-  memcpy(db + last_before_salt + (uint32_t)1U, salt, saltLen * sizeof (uint8_t));
+  uint32_t last_before_salt = dbLen - saltLen - 1U;
+  db[last_before_salt] = 1U;
+  memcpy(db + last_before_salt + 1U, salt, saltLen * sizeof (uint8_t));
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t dbMask[dbLen];
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = db;
-    uint8_t x = db[i] ^ dbMask[i];
+    uint8_t x = (uint32_t)db[i] ^ (uint32_t)dbMask[i];
     os[i] = x;
   }
-  uint32_t msBits = emBits % (uint32_t)8U;
-  if (msBits > (uint32_t)0U)
+  uint32_t msBits = emBits % 8U;
+  if (msBits > 0U)
   {
-    db[0U] = db[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits);
+    db[0U] = (uint32_t)db[0U] & 0xffU >> (8U - msBits);
   }
   memcpy(em, db, dbLen * sizeof (uint8_t));
   memcpy(em + dbLen, m1Hash, hLen * sizeof (uint8_t));
-  em[emLen - (uint32_t)1U] = (uint8_t)0xbcU;
+  em[emLen - 1U] = 0xbcU;
 }
 
 static inline bool
@@ -276,105 +276,100 @@ pss_verify(
   uint8_t *em
 )
 {
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t msBits = emBits % (uint32_t)8U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t msBits = emBits % 8U;
   uint8_t em_0;
-  if (msBits > (uint32_t)0U)
+  if (msBits > 0U)
   {
-    em_0 = em[0U] & (uint8_t)0xffU << msBits;
+    em_0 = (uint32_t)em[0U] & 0xffU << msBits;
   }
   else
   {
-    em_0 = (uint8_t)0U;
+    em_0 = 0U;
   }
-  uint8_t em_last = em[emLen - (uint32_t)1U];
-  if (emLen < saltLen + hash_len(a) + (uint32_t)2U)
+  uint8_t em_last = em[emLen - 1U];
+  if (emLen < saltLen + hash_len(a) + 2U)
   {
     return false;
   }
-  if (!(em_last == (uint8_t)0xbcU && em_0 == (uint8_t)0U))
+  if (!(em_last == 0xbcU && em_0 == 0U))
   {
     return false;
   }
-  uint32_t emLen1 = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t emLen1 = (emBits - 1U) / 8U + 1U;
   uint32_t hLen = hash_len(a);
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t m1Hash0[hLen];
   memset(m1Hash0, 0U, hLen * sizeof (uint8_t));
-  uint32_t dbLen = emLen1 - hLen - (uint32_t)1U;
+  uint32_t dbLen = emLen1 - hLen - 1U;
   uint8_t *maskedDB = em;
   uint8_t *m1Hash = em + dbLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t dbMask[dbLen];
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = dbMask;
-    uint8_t x = dbMask[i] ^ maskedDB[i];
+    uint8_t x = (uint32_t)dbMask[i] ^ (uint32_t)maskedDB[i];
     os[i] = x;
   }
-  uint32_t msBits1 = emBits % (uint32_t)8U;
-  if (msBits1 > (uint32_t)0U)
+  uint32_t msBits1 = emBits % 8U;
+  if (msBits1 > 0U)
   {
-    dbMask[0U] = dbMask[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits1);
+    dbMask[0U] = (uint32_t)dbMask[0U] & 0xffU >> (8U - msBits1);
   }
-  uint32_t padLen = emLen1 - saltLen - hLen - (uint32_t)1U;
+  uint32_t padLen = emLen1 - saltLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), padLen);
   uint8_t pad2[padLen];
   memset(pad2, 0U, padLen * sizeof (uint8_t));
-  pad2[padLen - (uint32_t)1U] = (uint8_t)0x01U;
+  pad2[padLen - 1U] = 0x01U;
   uint8_t *pad = dbMask;
   uint8_t *salt = dbMask + padLen;
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < padLen; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < padLen; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(pad[i], pad2[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  if (!(z == (uint8_t)255U))
+  if (!(z == 255U))
   {
     return false;
   }
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t m1[m1Len];
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash0, m1Len, m1);
-  uint8_t res0 = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < hLen; i++)
+  uint8_t res0 = 255U;
+  for (uint32_t i = 0U; i < hLen; i++)
   {
     uint8_t uu____1 = FStar_UInt8_eq_mask(m1Hash0[i], m1Hash[i]);
-    res0 = uu____1 & res0;
+    res0 = (uint32_t)uu____1 & (uint32_t)res0;
   }
   uint8_t z0 = res0;
-  return z0 == (uint8_t)255U;
+  return z0 == 255U;
 }
 
 static inline bool
 load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb, uint64_t *pkey)
 {
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen;
   uint64_t *e = pkey + nLen + nLen;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline bool
@@ -388,16 +383,16 @@ load_skey(
   uint64_t *skey
 )
 {
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   uint64_t *pkey = skey;
   uint64_t *d = skey + pkeyLen;
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  return b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return b && m1 == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -435,45 +430,36 @@ Hacl_RSAPSS_rsapss_sign(
 {
   uint32_t hLen = hash_len(a);
   bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    &&
-      saltLen
-      + hLen
-      + (uint32_t)2U
-      <= (modBits - (uint32_t)1U - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  b = saltLen <= 0xffffffffU - hLen - 8U && saltLen + hLen + 2U <= (modBits - 1U - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t m[nLen];
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t emBits = modBits - (uint32_t)1U;
-    uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t emBits = modBits - 1U;
+    uint32_t emLen = (emBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
     uint8_t em[emLen];
     memset(em, 0U, emLen * sizeof (uint8_t));
     pss_encode(a, saltLen, salt, msgLen, msg, emBits, em);
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(emLen, em, m);
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t s[nLen1];
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t m_[nLen1];
     memset(m_, 0U, nLen1 * sizeof (uint64_t));
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
     uint64_t *n = skey;
     uint64_t *r2 = skey + nLen2;
     uint64_t *e = skey + nLen2 + nLen2;
     uint64_t *d = skey + nLen2 + nLen2 + eLen;
     uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu,
       r2,
@@ -482,9 +468,7 @@ Hacl_RSAPSS_rsapss_sign(
       d,
       s);
     uint64_t mu0 = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu0,
       r2,
@@ -492,22 +476,22 @@ Hacl_RSAPSS_rsapss_sign(
       eBits,
       e,
       m_);
-    uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t uu____0 = FStar_UInt64_eq_mask(m[i], m_[i]);
       mask = uu____0 & mask;
     }
     uint64_t mask1 = mask;
     uint64_t eq_m = mask1;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t *os = s;
       uint64_t x = s[i];
       uint64_t x0 = eq_m & x;
       os[i] = x0;
     }
-    bool eq_b = eq_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    bool eq_b = eq_m == 0xFFFFFFFFFFFFFFFFULL;
     Hacl_Bignum_Convert_bn_to_bytes_be_uint64(k, s, sgnt);
     bool eq_b0 = eq_b;
     return eq_b0;
@@ -547,42 +531,36 @@ Hacl_RSAPSS_rsapss_verify(
 )
 {
   uint32_t hLen = hash_len(a);
-  bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    && sgntLen == (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  bool b = saltLen <= 0xffffffffU - hLen - 8U && sgntLen == (modBits - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t m[nLen];
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t s[nLen1];
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(k, sgnt, s);
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
     uint64_t *n = pkey;
     uint64_t *r2 = pkey + nLen2;
     uint64_t *e = pkey + nLen2 + nLen2;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t mask = acc;
     bool res;
-    if (mask == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+    if (mask == 0xFFFFFFFFFFFFFFFFULL)
     {
       uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-        / (uint32_t)64U
-        + (uint32_t)1U,
+      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
         n,
         mu,
         r2,
@@ -591,17 +569,17 @@ Hacl_RSAPSS_rsapss_verify(
         e,
         m);
       bool ite;
-      if (!((modBits - (uint32_t)1U) % (uint32_t)8U == (uint32_t)0U))
+      if (!((modBits - 1U) % 8U == 0U))
       {
         ite = true;
       }
       else
       {
-        uint32_t i = (modBits - (uint32_t)1U) / (uint32_t)64U;
-        uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
+        uint32_t i = (modBits - 1U) / 64U;
+        uint32_t j = (modBits - 1U) % 64U;
         uint64_t tmp = m[i];
-        uint64_t get_bit = tmp >> j & (uint64_t)1U;
-        ite = get_bit == (uint64_t)0U;
+        uint64_t get_bit = tmp >> j & 1ULL;
+        ite = get_bit == 0ULL;
       }
       if (ite)
       {
@@ -620,8 +598,8 @@ Hacl_RSAPSS_rsapss_verify(
     bool b10 = b1;
     if (b10)
     {
-      uint32_t emBits = modBits - (uint32_t)1U;
-      uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+      uint32_t emBits = modBits - 1U;
+      uint32_t emLen = (emBits - 1U) / 8U + 1U;
       KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
       uint8_t em[emLen];
       memset(em, 0U, emLen * sizeof (uint8_t));
@@ -649,15 +627,11 @@ uint64_t
 *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb)
 {
   bool ite;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
@@ -667,8 +641,8 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), pkeyLen);
   uint64_t *pkey = (uint64_t *)KRML_HOST_CALLOC(pkeyLen, sizeof (uint64_t));
@@ -678,24 +652,19 @@ uint64_t
   }
   uint64_t *pkey1 = pkey;
   uint64_t *pkey2 = pkey1;
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey2;
   uint64_t *r2 = pkey2 + nLen1;
   uint64_t *e = pkey2 + nLen1 + nLen1;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   if (b)
   {
     return pkey2;
@@ -727,27 +696,23 @@ uint64_t
 )
 {
   bool ite0;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite0 =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite0 = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
     ite0 = false;
   }
   bool ite;
-  if (ite0 && (uint32_t)0U < dBits)
+  if (ite0 && 0U < dBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite = dLen <= (uint32_t)67108863U && (uint32_t)2U * nLen <= (uint32_t)0xffffffffU - eLen - dLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    uint32_t dLen = (dBits - 1U) / 64U + 1U;
+    ite = dLen <= 67108863U && 2U * nLen <= 0xffffffffU - eLen - dLen;
   }
   else
   {
@@ -757,9 +722,9 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
+  uint32_t dLen = (dBits - 1U) / 64U + 1U;
   uint32_t skeyLen = nLen + nLen + eLen + dLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), skeyLen);
   uint64_t *skey = (uint64_t *)KRML_HOST_CALLOC(skeyLen, sizeof (uint64_t));
@@ -769,33 +734,28 @@ uint64_t
   }
   uint64_t *skey1 = skey;
   uint64_t *skey2 = skey1;
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen1 = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen1 = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen1 + nLen1 + eLen1;
   uint64_t *pkey = skey2;
   uint64_t *d = skey2 + pkeyLen;
-  uint32_t nbLen1 = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen1 = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen1 = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen1 = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen2;
   uint64_t *e = pkey + nLen2 + nLen2;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen1, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen1, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m10 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m10;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  bool b0 = b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b0 = b && m1 == 0xFFFFFFFFFFFFFFFFULL;
   if (b0)
   {
     return skey2;
@@ -842,21 +802,12 @@ Hacl_RSAPSS_rsapss_skey_sign(
 )
 {
   KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
+    2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U);
   uint64_t
-  skey[(uint32_t)2U
-  * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-  + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-  + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U];
+  skey[2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U];
   memset(skey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U)
     * sizeof (uint64_t));
   bool b = load_skey(modBits, eBits, dBits, nb, eb, db, skey);
   if (b)
@@ -909,20 +860,11 @@ Hacl_RSAPSS_rsapss_pkey_verify(
   uint8_t *msg
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
-  uint64_t
-  pkey[(uint32_t)2U
-  * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-  + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U];
+  KRML_CHECK_SIZE(sizeof (uint64_t), 2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U);
+  uint64_t pkey[2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U];
   memset(pkey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    * sizeof (uint64_t));
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U) * sizeof (uint64_t));
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   if (b)
   {
diff --git a/src/Hacl_SHA2_Vec128.c b/src/Hacl_SHA2_Vec128.c
index e1b6e304..19b56a5c 100644
--- a/src/Hacl_SHA2_Vec128.c
+++ b/src/Hacl_SHA2_Vec128.c
@@ -32,9 +32,9 @@
 static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
@@ -46,7 +46,7 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -55,18 +55,18 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -196,14 +196,14 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -218,10 +218,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -229,10 +229,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -252,30 +252,30 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -283,9 +283,9 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -299,17 +299,17 @@ sha224_update_nblocks4(
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha224_update4(mb, st);
@@ -325,53 +325,53 @@ sha224_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -382,7 +382,7 @@ sha224_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha224_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update4(last1, hash);
     return;
@@ -458,18 +458,18 @@ sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -491,10 +491,10 @@ Hacl_SHA2_Vec128_sha224_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha224_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -512,9 +512,9 @@ Hacl_SHA2_Vec128_sha224_4(
 static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
@@ -526,7 +526,7 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -535,18 +535,18 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -676,14 +676,14 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -698,10 +698,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -709,10 +709,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -732,30 +732,30 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -763,9 +763,9 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -779,17 +779,17 @@ sha256_update_nblocks4(
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha256_update4(mb, st);
@@ -805,53 +805,53 @@ sha256_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -862,7 +862,7 @@ sha256_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha256_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update4(last1, hash);
     return;
@@ -938,18 +938,18 @@ sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -971,10 +971,10 @@ Hacl_SHA2_Vec128_sha256_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha256_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
diff --git a/src/Hacl_SHA2_Vec256.c b/src/Hacl_SHA2_Vec256.c
index b74ce621..37d903ad 100644
--- a/src/Hacl_SHA2_Vec256.c
+++ b/src/Hacl_SHA2_Vec256.c
@@ -33,9 +33,9 @@
 static inline void sha224_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
@@ -47,7 +47,7 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -64,14 +64,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -281,14 +281,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -303,10 +303,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -314,10 +314,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -337,30 +337,30 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -368,9 +368,9 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -384,8 +384,8 @@ sha224_update_nblocks8(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -395,14 +395,14 @@ sha224_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_8p
     mb =
       {
@@ -431,18 +431,18 @@ sha224_update_last8(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -453,67 +453,67 @@ sha224_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
   Hacl_Impl_SHA2_Types_uint8_8p
@@ -550,7 +550,7 @@ sha224_update_last8(
   Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
   sha224_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update8(last1, hash);
     return;
@@ -662,10 +662,10 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -674,14 +674,14 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 28U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 28U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 28U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -740,10 +740,10 @@ Hacl_SHA2_Vec256_sha224_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha224_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -782,9 +782,9 @@ Hacl_SHA2_Vec256_sha224_8(
 static inline void sha256_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
@@ -796,7 +796,7 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -813,14 +813,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1030,14 +1030,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1052,10 +1052,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1063,10 +1063,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1086,30 +1086,30 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -1117,9 +1117,9 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -1133,8 +1133,8 @@ sha256_update_nblocks8(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1144,14 +1144,14 @@ sha256_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_8p
     mb =
       {
@@ -1180,18 +1180,18 @@ sha256_update_last8(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1202,67 +1202,67 @@ sha256_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
   Hacl_Impl_SHA2_Types_uint8_8p
@@ -1299,7 +1299,7 @@ sha256_update_last8(
   Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
   sha256_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update8(last1, hash);
     return;
@@ -1411,10 +1411,10 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -1423,14 +1423,14 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 32U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 32U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 32U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -1489,10 +1489,10 @@ Hacl_SHA2_Vec256_sha256_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha256_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -1531,9 +1531,9 @@ Hacl_SHA2_Vec256_sha256_8(
 static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint64_t hi = Hacl_Impl_SHA2_Generic_h384[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
@@ -1545,7 +1545,7 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -1554,18 +1554,18 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1679,14 +1679,14 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1701,10 +1701,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1712,10 +1712,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1735,30 +1735,30 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -1766,9 +1766,9 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -1782,17 +1782,17 @@ sha384_update_nblocks4(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha384_update4(mb, st);
@@ -1808,53 +1808,53 @@ sha384_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -1865,7 +1865,7 @@ sha384_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha384_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha384_update4(last1, hash);
     return;
@@ -1933,18 +1933,18 @@ sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)48U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 48U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 48U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 48U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 48U * sizeof (uint8_t));
 }
 
 void
@@ -1966,10 +1966,10 @@ Hacl_SHA2_Vec256_sha384_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha384_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha384_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -1987,9 +1987,9 @@ Hacl_SHA2_Vec256_sha384_4(
 static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint64_t hi = Hacl_Impl_SHA2_Generic_h512[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
@@ -2001,7 +2001,7 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -2010,18 +2010,18 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -2135,14 +2135,14 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -2157,10 +2157,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -2168,10 +2168,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -2191,30 +2191,30 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -2222,9 +2222,9 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -2238,17 +2238,17 @@ sha512_update_nblocks4(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha512_update4(mb, st);
@@ -2264,53 +2264,53 @@ sha512_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -2321,7 +2321,7 @@ sha512_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha512_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update4(last1, hash);
     return;
@@ -2389,18 +2389,18 @@ sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 64U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 64U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 64U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 64U * sizeof (uint8_t));
 }
 
 void
@@ -2422,10 +2422,10 @@ Hacl_SHA2_Vec256_sha512_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha512_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha512_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
diff --git a/src/Hacl_Salsa20.c b/src/Hacl_Salsa20.c
index 2758f8a4..151df07d 100644
--- a/src/Hacl_Salsa20.c
+++ b/src/Hacl_Salsa20.c
@@ -30,35 +30,35 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t sta = st[b];
   uint32_t stb0 = st[a];
   uint32_t std0 = st[d];
-  uint32_t sta1 = sta ^ ((stb0 + std0) << (uint32_t)7U | (stb0 + std0) >> (uint32_t)25U);
+  uint32_t sta1 = sta ^ ((stb0 + std0) << 7U | (stb0 + std0) >> 25U);
   st[b] = sta1;
   uint32_t sta0 = st[c];
   uint32_t stb1 = st[b];
   uint32_t std1 = st[a];
-  uint32_t sta10 = sta0 ^ ((stb1 + std1) << (uint32_t)9U | (stb1 + std1) >> (uint32_t)23U);
+  uint32_t sta10 = sta0 ^ ((stb1 + std1) << 9U | (stb1 + std1) >> 23U);
   st[c] = sta10;
   uint32_t sta2 = st[d];
   uint32_t stb2 = st[c];
   uint32_t std2 = st[b];
-  uint32_t sta11 = sta2 ^ ((stb2 + std2) << (uint32_t)13U | (stb2 + std2) >> (uint32_t)19U);
+  uint32_t sta11 = sta2 ^ ((stb2 + std2) << 13U | (stb2 + std2) >> 19U);
   st[d] = sta11;
   uint32_t sta3 = st[a];
   uint32_t stb = st[d];
   uint32_t std = st[c];
-  uint32_t sta12 = sta3 ^ ((stb + std) << (uint32_t)18U | (stb + std) >> (uint32_t)14U);
+  uint32_t sta12 = sta3 ^ ((stb + std) << 18U | (stb + std) >> 14U);
   st[a] = sta12;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U, (uint32_t)1U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)6U, (uint32_t)7U, (uint32_t)4U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)11U, (uint32_t)8U, (uint32_t)9U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)12U, (uint32_t)13U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 5U, 9U, 13U, 1U);
+  quarter_round(st, 10U, 14U, 2U, 6U);
+  quarter_round(st, 15U, 3U, 7U, 11U);
+  quarter_round(st, 0U, 1U, 2U, 3U);
+  quarter_round(st, 5U, 6U, 7U, 4U);
+  quarter_round(st, 10U, 11U, 8U, 9U);
+  quarter_round(st, 15U, 12U, 13U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -77,14 +77,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void salsa20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[8U] = k[8U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -98,42 +98,38 @@ static inline void salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
-  ctx[8U] = (uint32_t)0U;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
-  salsa20_core(k, ctx, (uint32_t)0U);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, k[i]););
+  uint32_t *k1 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
+  ctx[8U] = 0U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
+  salsa20_core(k, ctx, 0U);
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, k[i]););
 }
 
 static inline void
@@ -150,101 +146,93 @@ salsa20_encrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -263,101 +251,93 @@ salsa20_decrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -368,34 +348,34 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[4U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  ctx[0U] = (uint32_t)0x61707865U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)4U * sizeof (uint32_t));
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  uint32_t *k1 = k32 + 4U;
+  ctx[0U] = 0x61707865U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 4U * sizeof (uint32_t));
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   rounds(ctx);
   uint32_t r0 = ctx[0U];
   uint32_t r1 = ctx[5U];
@@ -406,11 +386,7 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t r6 = ctx[8U];
   uint32_t r7 = ctx[9U];
   uint32_t res[8U] = { r0, r1, r2, r3, r4, r5, r6, r7 };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, res[i]););
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(out + i * 4U, res[i]););
 }
 
 void
diff --git a/src/Hacl_Streaming_Blake2.c b/src/Hacl_Streaming_Blake2.c
index 948d56c2..ae1f3181 100644
--- a/src/Hacl_Streaming_Blake2.c
+++ b/src/Hacl_Streaming_Blake2.c
@@ -30,19 +30,19 @@
 */
 Hacl_Streaming_Blake2_blake2s_32_state *Hacl_Streaming_Blake2_blake2s_32_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
+  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
   Hacl_Streaming_Blake2_blake2s_32_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2_blake2s_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2_blake2s_32_state
   *p =
     (Hacl_Streaming_Blake2_blake2s_32_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2_blake2s_32_state
       ));
   p[0U] = s1;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(block_state.snd, 0U, 32U);
   return p;
 }
 
@@ -54,9 +54,9 @@ void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_
   Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(block_state.snd, 0U, 32U);
   Hacl_Streaming_Blake2_blake2s_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s1[0U] = tmp;
 }
 
@@ -72,33 +72,33 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
 {
   Hacl_Streaming_Blake2_blake2s_32_state s1 = *p;
   uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -113,46 +113,46 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint32_t *wv = block_state1.fst;
       uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_32_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     uint32_t *wv = block_state1.fst;
     uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
@@ -168,7 +168,7 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
@@ -176,13 +176,13 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
     uint8_t *buf0 = s2.buf;
     uint64_t total_len10 = s2.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -201,45 +201,39 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
     uint8_t *buf = s20.buf;
     uint64_t total_len1 = s20.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint32_t *wv = block_state1.fst;
       uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_32_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     uint32_t *wv = block_state1.fst;
     uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
@@ -270,13 +264,13 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t wv0[16U] = { 0U };
@@ -284,28 +278,28 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
   Hacl_Streaming_Blake2_blake2s_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
   uint32_t *src_b = block_state.snd;
   uint32_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(dst_b, src_b, 16U * sizeof (uint32_t));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   uint32_t *wv1 = tmp_block_state.fst;
   uint32_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint32_t nb = 0U;
+  Hacl_Blake2s_32_blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   uint32_t *wv = tmp_block_state.fst;
   uint32_t *hash = tmp_block_state.snd;
   Hacl_Blake2s_32_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst, tmp_block_state.snd);
 }
 
 /**
@@ -329,19 +323,19 @@ void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_
 */
 Hacl_Streaming_Blake2_blake2b_32_state *Hacl_Streaming_Blake2_blake2b_32_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
+  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
   Hacl_Streaming_Blake2_blake2b_32_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2_blake2b_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2_blake2b_32_state
   *p =
     (Hacl_Streaming_Blake2_blake2b_32_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2_blake2b_32_state
       ));
   p[0U] = s1;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(block_state.snd, 0U, 64U);
   return p;
 }
 
@@ -353,9 +347,9 @@ void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_
   Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(block_state.snd, 0U, 64U);
   Hacl_Streaming_Blake2_blake2b_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s1[0U] = tmp;
 }
 
@@ -371,33 +365,33 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
 {
   Hacl_Streaming_Blake2_blake2b_32_state s1 = *p;
   uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (len <= 128U - sz)
   {
     Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -412,28 +406,28 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint64_t *wv = block_state1.fst;
       uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_32_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -441,22 +435,22 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
         nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)128U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     uint64_t *wv = block_state1.fst;
     uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_32_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -477,7 +471,7 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
+    uint32_t diff = 128U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
@@ -485,13 +479,13 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
     uint8_t *buf0 = s2.buf;
     uint64_t total_len10 = s2.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -510,21 +504,21 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
     uint8_t *buf = s20.buf;
     uint64_t total_len1 = s20.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint64_t *wv = block_state1.fst;
       uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_32_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -532,28 +526,22 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
         nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     uint64_t *wv = block_state1.fst;
     uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_32_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -589,13 +577,13 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t wv0[16U] = { 0U };
@@ -603,23 +591,23 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
   Hacl_Streaming_Blake2_blake2b_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
   uint64_t *src_b = block_state.snd;
   uint64_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint64_t));
+  memcpy(dst_b, src_b, 16U * sizeof (uint64_t));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   uint64_t *wv1 = tmp_block_state.fst;
   uint64_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)0U,
+  uint32_t nb = 0U;
+  Hacl_Blake2b_32_blake2b_update_multi(0U,
     wv1,
     hash0,
     FStar_UInt128_uint64_to_uint128(prev_len),
@@ -634,7 +622,7 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
     FStar_UInt128_uint64_to_uint128(prev_len_last),
     r,
     buf_last);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst, tmp_block_state.snd);
 }
 
 /**
diff --git a/src/Hacl_Streaming_Blake2b_256.c b/src/Hacl_Streaming_Blake2b_256.c
index bdb5433f..fee698bf 100644
--- a/src/Hacl_Streaming_Blake2b_256.c
+++ b/src/Hacl_Streaming_Blake2b_256.c
@@ -31,27 +31,27 @@
 Hacl_Streaming_Blake2b_256_blake2b_256_state
 *Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec256
   *wv =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Lib_IntVector_Intrinsics_vec256
   *b =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2b_256_blake2b_256_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2b_256_blake2b_256_state
   *p =
     (Hacl_Streaming_Blake2b_256_blake2b_256_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2b_256_blake2b_256_state
       ));
   p[0U] = s;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_256_blake2b_init(block_state.snd, 0U, 64U);
   return p;
 }
 
@@ -66,9 +66,9 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init(
   Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_256_blake2b_init(block_state.snd, 0U, 64U);
   Hacl_Streaming_Blake2b_256_blake2b_256_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -84,33 +84,33 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
 {
   Hacl_Streaming_Blake2b_256_blake2b_256_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (len <= 128U - sz)
   {
     Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
     Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -125,28 +125,28 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
     Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_256_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -154,22 +154,22 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
         nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)128U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_256_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -190,7 +190,7 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
+    uint32_t diff = 128U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
@@ -198,13 +198,13 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -223,21 +223,21 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_256_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -245,28 +245,22 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
         nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_256_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -302,13 +296,13 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
@@ -316,23 +310,23 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
   Hacl_Streaming_Blake2b_256_blake2b_256_block_state tmp_block_state = { .fst = wv0, .snd = b };
   Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd;
   Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst;
   Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)0U,
+  uint32_t nb = 0U;
+  Hacl_Blake2b_256_blake2b_update_multi(0U,
     wv1,
     hash0,
     FStar_UInt128_uint64_to_uint128(prev_len),
@@ -347,7 +341,7 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
     FStar_UInt128_uint64_to_uint128(prev_len_last),
     r,
     buf_last);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
+  Hacl_Blake2b_256_blake2b_finish(64U, dst, tmp_block_state.snd);
 }
 
 /**
diff --git a/src/Hacl_Streaming_Blake2s_128.c b/src/Hacl_Streaming_Blake2s_128.c
index f97bf5d0..03bc4e13 100644
--- a/src/Hacl_Streaming_Blake2s_128.c
+++ b/src/Hacl_Streaming_Blake2s_128.c
@@ -31,27 +31,27 @@
 Hacl_Streaming_Blake2s_128_blake2s_128_state
 *Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec128
   *wv =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Lib_IntVector_Intrinsics_vec128
   *b =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2s_128_blake2s_128_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2s_128_blake2s_128_state
   *p =
     (Hacl_Streaming_Blake2s_128_blake2s_128_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2s_128_blake2s_128_state
       ));
   p[0U] = s;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_128_blake2s_init(block_state.snd, 0U, 32U);
   return p;
 }
 
@@ -66,9 +66,9 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init(
   Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_128_blake2s_init(block_state.snd, 0U, 32U);
   Hacl_Streaming_Blake2s_128_blake2s_128_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -84,33 +84,33 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
 {
   Hacl_Streaming_Blake2s_128_blake2s_128_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
     Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -125,46 +125,46 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
     Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_128_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
@@ -180,7 +180,7 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
@@ -188,13 +188,13 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -213,45 +213,39 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_128_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
@@ -282,13 +276,13 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
@@ -296,28 +290,28 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
   Hacl_Streaming_Blake2s_128_blake2s_128_block_state tmp_block_state = { .fst = wv0, .snd = b };
   Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd;
   Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst;
   Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint32_t nb = 0U;
+  Hacl_Blake2s_128_blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst;
   Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd;
   Hacl_Blake2s_128_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
+  Hacl_Blake2s_128_blake2s_finish(32U, dst, tmp_block_state.snd);
 }
 
 /**
diff --git a/src/Hacl_Streaming_Poly1305_128.c b/src/Hacl_Streaming_Poly1305_128.c
index c3f7c19a..e8275b99 100644
--- a/src/Hacl_Streaming_Poly1305_128.c
+++ b/src/Hacl_Streaming_Poly1305_128.c
@@ -28,19 +28,18 @@
 Hacl_Streaming_Poly1305_128_poly1305_128_state
 *Hacl_Streaming_Poly1305_128_create_in(uint8_t *k)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec128
   *r1 =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Lib_IntVector_Intrinsics_vec128 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_0 = k_;
   Hacl_Streaming_Poly1305_128_poly1305_128_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
   Hacl_Streaming_Poly1305_128_poly1305_128_state
   *p =
     (Hacl_Streaming_Poly1305_128_poly1305_128_state *)KRML_HOST_MALLOC(sizeof (
@@ -59,11 +58,10 @@ Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly130
   uint8_t *buf = scrut.buf;
   Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
   Hacl_Poly1305_128_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_1 = k_;
   Hacl_Streaming_Poly1305_128_poly1305_128_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
   s[0U] = tmp;
 }
 
@@ -79,20 +77,20 @@ Hacl_Streaming_Poly1305_128_update(
 {
   Hacl_Streaming_Poly1305_128_poly1305_128_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)32U;
+    sz = 32U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
+    sz = (uint32_t)(total_len % (uint64_t)32U);
   }
-  if (len <= (uint32_t)32U - sz)
+  if (len <= 32U - sz)
   {
     Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
     Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
@@ -100,13 +98,13 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)32U;
+      sz1 = 32U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -122,7 +120,7 @@ Hacl_Streaming_Poly1305_128_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
     Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
@@ -130,29 +128,29 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)32U;
+      sz1 = 32U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
+      Hacl_Poly1305_128_poly1305_update(block_state1, 32U, buf);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)32U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)32U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)32U;
+      ite = 32U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)32U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)32U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
+    uint32_t n_blocks = (len - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
@@ -172,7 +170,7 @@ Hacl_Streaming_Poly1305_128_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)32U - sz;
+    uint32_t diff = 32U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
@@ -181,13 +179,13 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len10 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)32U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)32U;
+      sz10 = 32U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)32U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)32U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -208,35 +206,29 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len1 = s10.total_len;
     uint8_t *k_10 = s10.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)32U;
+      sz1 = 32U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
+      Hacl_Poly1305_128_poly1305_update(block_state1, 32U, buf);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)32U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)32U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)32U;
+      ite = 32U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)32U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)32U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
+    uint32_t n_blocks = (len - diff - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
@@ -269,61 +261,51 @@ Hacl_Streaming_Poly1305_128_finish(
   uint64_t total_len = scrut.total_len;
   uint8_t *k_ = scrut.p_key;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)32U;
+    r = 32U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
+    r = (uint32_t)(total_len % (uint64_t)32U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U };
   Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite0 = (uint32_t)16U;
+    ite0 = 16U;
   }
   else
   {
-    ite0 = r % (uint32_t)16U;
+    ite0 = r % 16U;
   }
   uint8_t *buf_last = buf_1 + r - ite0;
   uint8_t *buf_multi = buf_1;
   uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite = (uint32_t)16U;
+    ite = 16U;
   }
   else
   {
-    ite = r % (uint32_t)16U;
+    ite = r % 16U;
   }
   Hacl_Poly1305_128_poly1305_update(tmp_block_state, r - ite, buf_multi);
   uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite1 = (uint32_t)16U;
+    ite1 = 16U;
   }
   else
   {
-    ite1 = r % (uint32_t)16U;
+    ite1 = r % 16U;
   }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_128_poly1305_update(tmp_block_state, ite2, buf_last);
+  Hacl_Poly1305_128_poly1305_update(tmp_block_state, ite1, buf_last);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 tmp[25U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Hacl_Poly1305_128_poly1305_finish(dst, k_, tmp);
 }
 
diff --git a/src/Hacl_Streaming_Poly1305_256.c b/src/Hacl_Streaming_Poly1305_256.c
index e56275a4..ff769af9 100644
--- a/src/Hacl_Streaming_Poly1305_256.c
+++ b/src/Hacl_Streaming_Poly1305_256.c
@@ -28,19 +28,18 @@
 Hacl_Streaming_Poly1305_256_poly1305_256_state
 *Hacl_Streaming_Poly1305_256_create_in(uint8_t *k)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec256
   *r1 =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Lib_IntVector_Intrinsics_vec256 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_0 = k_;
   Hacl_Streaming_Poly1305_256_poly1305_256_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
   Hacl_Streaming_Poly1305_256_poly1305_256_state
   *p =
     (Hacl_Streaming_Poly1305_256_poly1305_256_state *)KRML_HOST_MALLOC(sizeof (
@@ -59,11 +58,10 @@ Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly130
   uint8_t *buf = scrut.buf;
   Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
   Hacl_Poly1305_256_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_1 = k_;
   Hacl_Streaming_Poly1305_256_poly1305_256_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
   s[0U] = tmp;
 }
 
@@ -79,20 +77,20 @@ Hacl_Streaming_Poly1305_256_update(
 {
   Hacl_Streaming_Poly1305_256_poly1305_256_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
     Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
@@ -100,13 +98,13 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -122,7 +120,7 @@ Hacl_Streaming_Poly1305_256_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
     Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
@@ -130,29 +128,29 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
+      Hacl_Poly1305_256_poly1305_update(block_state1, 64U, buf);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
@@ -172,7 +170,7 @@ Hacl_Streaming_Poly1305_256_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
@@ -181,13 +179,13 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len10 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -208,35 +206,29 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len1 = s10.total_len;
     uint8_t *k_10 = s10.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
+      Hacl_Poly1305_256_poly1305_update(block_state1, 64U, buf);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
@@ -269,61 +261,51 @@ Hacl_Streaming_Poly1305_256_finish(
   uint64_t total_len = scrut.total_len;
   uint8_t *k_ = scrut.p_key;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U };
   Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite0 = (uint32_t)16U;
+    ite0 = 16U;
   }
   else
   {
-    ite0 = r % (uint32_t)16U;
+    ite0 = r % 16U;
   }
   uint8_t *buf_last = buf_1 + r - ite0;
   uint8_t *buf_multi = buf_1;
   uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite = (uint32_t)16U;
+    ite = 16U;
   }
   else
   {
-    ite = r % (uint32_t)16U;
+    ite = r % 16U;
   }
   Hacl_Poly1305_256_poly1305_update(tmp_block_state, r - ite, buf_multi);
   uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite1 = (uint32_t)16U;
+    ite1 = 16U;
   }
   else
   {
-    ite1 = r % (uint32_t)16U;
+    ite1 = r % 16U;
   }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_256_poly1305_update(tmp_block_state, ite2, buf_last);
+  Hacl_Poly1305_256_poly1305_update(tmp_block_state, ite1, buf_last);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 tmp[25U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Hacl_Poly1305_256_poly1305_finish(dst, k_, tmp);
 }
 
diff --git a/src/Hacl_Streaming_Poly1305_32.c b/src/Hacl_Streaming_Poly1305_32.c
index 249a622f..b1eb12b2 100644
--- a/src/Hacl_Streaming_Poly1305_32.c
+++ b/src/Hacl_Streaming_Poly1305_32.c
@@ -27,15 +27,14 @@
 
 Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint8_t));
-  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t));
+  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
   uint64_t *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_0 = k_;
   Hacl_Streaming_Poly1305_32_poly1305_32_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
   Hacl_Streaming_Poly1305_32_poly1305_32_state
   *p =
     (Hacl_Streaming_Poly1305_32_poly1305_32_state *)KRML_HOST_MALLOC(sizeof (
@@ -54,11 +53,10 @@ Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
   Hacl_Poly1305_32_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_1 = k_;
   Hacl_Streaming_Poly1305_32_poly1305_32_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
   s[0U] = tmp;
 }
 
@@ -74,20 +72,20 @@ Hacl_Streaming_Poly1305_32_update(
 {
   Hacl_Streaming_Poly1305_32_poly1305_32_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)16U;
+    sz = 16U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
+    sz = (uint32_t)(total_len % (uint64_t)16U);
   }
-  if (len <= (uint32_t)16U - sz)
+  if (len <= 16U - sz)
   {
     Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
     uint64_t *block_state1 = s1.block_state;
@@ -95,13 +93,13 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)16U;
+      sz1 = 16U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -117,7 +115,7 @@ Hacl_Streaming_Poly1305_32_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
     uint64_t *block_state1 = s1.block_state;
@@ -125,29 +123,29 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)16U;
+      sz1 = 16U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
+      Hacl_Poly1305_32_poly1305_update(block_state1, 16U, buf);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)16U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)16U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)16U;
+      ite = 16U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)16U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)16U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
+    uint32_t n_blocks = (len - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
@@ -167,7 +165,7 @@ Hacl_Streaming_Poly1305_32_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)16U - sz;
+    uint32_t diff = 16U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
@@ -176,13 +174,13 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len10 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)16U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)16U;
+      sz10 = 16U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)16U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)16U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -203,35 +201,29 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len1 = s10.total_len;
     uint8_t *k_10 = s10.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)16U;
+      sz1 = 16U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
+      Hacl_Poly1305_32_poly1305_update(block_state1, 16U, buf);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)16U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)16U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)16U;
+      ite = 16U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)16U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)16U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
+    uint32_t n_blocks = (len - diff - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
@@ -264,33 +256,33 @@ Hacl_Streaming_Poly1305_32_finish(
   uint64_t total_len = scrut.total_len;
   uint8_t *k_ = scrut.p_key;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)16U;
+    r = 16U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
+    r = (uint32_t)(total_len % (uint64_t)16U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t r1[25U] = { 0U };
   uint64_t *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 25U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite = (uint32_t)16U;
+    ite = 16U;
   }
   else
   {
-    ite = r % (uint32_t)16U;
+    ite = r % 16U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Poly1305_32_poly1305_update(tmp_block_state, (uint32_t)0U, buf_multi);
+  Hacl_Poly1305_32_poly1305_update(tmp_block_state, 0U, buf_multi);
   Hacl_Poly1305_32_poly1305_update(tmp_block_state, r, buf_last);
   uint64_t tmp[25U] = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(tmp, tmp_block_state, 25U * sizeof (uint64_t));
   Hacl_Poly1305_32_poly1305_finish(dst, k_, tmp);
 }
 
diff --git a/src/msvc/EverCrypt_AEAD.c b/src/msvc/EverCrypt_AEAD.c
index d3a4ffbe..b0fb4826 100644
--- a/src/msvc/EverCrypt_AEAD.c
+++ b/src/msvc/EverCrypt_AEAD.c
@@ -46,8 +46,8 @@ The state may be reused as many times as desired.
 */
 bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -86,11 +86,11 @@ Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s)
 static EverCrypt_Error_error_code
 create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
   EverCrypt_AEAD_state_s
   *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
   p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek });
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   dst[0U] = p;
   return EverCrypt_Error_Success;
 }
@@ -98,8 +98,8 @@ create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -108,11 +108,11 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)480U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(480U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 176U;
+    aes128_key_expansion(k, keys_b);
+    aes128_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek });
@@ -128,8 +128,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -138,11 +138,11 @@ create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)544U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(544U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 240U;
+    aes256_key_expansion(k, keys_b);
+    aes256_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek });
@@ -208,115 +208,106 @@ encrypt_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -340,115 +331,106 @@ encrypt_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -510,7 +492,7 @@ EverCrypt_AEAD_encrypt(
       }
     case Spec_Cipher_Expansion_Hacl_CHACHA20:
       {
-        if (iv_len != (uint32_t)12U)
+        if (iv_len != 12U)
         {
           return EverCrypt_Error_InvalidIVLength;
         }
@@ -546,124 +528,115 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -697,124 +670,115 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -840,15 +804,15 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -859,112 +823,103 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)304U;
+      uint8_t *scratch_b = ek0 + 304U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+      uint8_t *hkeys_b = ek1 + 176U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -988,15 +943,15 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1007,112 +962,103 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)368U;
+      uint8_t *scratch_b = ek0 + 368U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+      uint8_t *hkeys_b = ek1 + 240U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -1136,10 +1082,10 @@ EverCrypt_AEAD_encrypt_expand_chacha20_poly1305(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(iv_len);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   uint8_t *ek0 = (*s).ek;
   EverCrypt_Chacha20Poly1305_aead_encrypt(ek0, iv, ad_len, ad, plain_len, plain, cipher, tag);
@@ -1222,66 +1168,57 @@ decrypt_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1305,15 +1242,15 @@ decrypt_aes128_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1335,11 +1272,11 @@ decrypt_aes128_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1366,66 +1303,57 @@ decrypt_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1449,15 +1377,15 @@ decrypt_aes256_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1479,11 +1407,11 @@ decrypt_aes256_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1514,14 +1442,14 @@ decrypt_chacha20_poly1305(
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len != (uint32_t)12U)
+  if (iv_len != 12U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
   uint32_t
   r = EverCrypt_Chacha20Poly1305_aead_decrypt(ek, iv, ad_len, ad, cipher_len, dst, cipher, tag);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return EverCrypt_Error_Success;
   }
@@ -1620,73 +1548,64 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)304U;
+  uint8_t *scratch_b = ek0 + 304U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1710,15 +1629,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1740,11 +1659,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1779,73 +1698,64 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)368U;
+  uint8_t *scratch_b = ek0 + 368U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1869,15 +1779,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1899,11 +1809,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1930,15 +1840,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1949,61 +1859,52 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2027,15 +1928,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2057,11 +1958,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2086,15 +1987,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -2105,61 +2006,52 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2183,15 +2075,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2213,11 +2105,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2244,7 +2136,7 @@ EverCrypt_AEAD_decrypt_expand_chacha20_poly1305(
 {
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   EverCrypt_Error_error_code
   r = decrypt_chacha20_poly1305(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst);
diff --git a/src/msvc/EverCrypt_AutoConfig2.c b/src/msvc/EverCrypt_AutoConfig2.c
index b549d020..5a92d995 100644
--- a/src/msvc/EverCrypt_AutoConfig2.c
+++ b/src/msvc/EverCrypt_AutoConfig2.c
@@ -113,59 +113,59 @@ void EverCrypt_AutoConfig2_recall(void)
 void EverCrypt_AutoConfig2_init(void)
 {
   #if HACL_CAN_COMPILE_VALE
-  if (check_aesni() != (uint64_t)0U)
+  if (check_aesni() != 0ULL)
   {
     cpu_has_aesni[0U] = true;
     cpu_has_pclmulqdq[0U] = true;
   }
-  if (check_sha() != (uint64_t)0U)
+  if (check_sha() != 0ULL)
   {
     cpu_has_shaext[0U] = true;
   }
-  if (check_adx_bmi2() != (uint64_t)0U)
+  if (check_adx_bmi2() != 0ULL)
   {
     cpu_has_bmi2[0U] = true;
     cpu_has_adx[0U] = true;
   }
-  if (check_avx() != (uint64_t)0U)
+  if (check_avx() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx[0U] = true;
       }
     }
   }
-  if (check_avx2() != (uint64_t)0U)
+  if (check_avx2() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx2[0U] = true;
       }
     }
   }
-  if (check_sse() != (uint64_t)0U)
+  if (check_sse() != 0ULL)
   {
     cpu_has_sse[0U] = true;
   }
-  if (check_movbe() != (uint64_t)0U)
+  if (check_movbe() != 0ULL)
   {
     cpu_has_movbe[0U] = true;
   }
-  if (check_rdrand() != (uint64_t)0U)
+  if (check_rdrand() != 0ULL)
   {
     cpu_has_rdrand[0U] = true;
   }
-  if (check_avx512() != (uint64_t)0U)
+  if (check_avx512() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
-        if (check_avx512_xcr0() != (uint64_t)0U)
+        if (check_avx512_xcr0() != 0ULL)
         {
           cpu_has_avx512[0U] = true;
           return;
diff --git a/src/msvc/EverCrypt_Chacha20Poly1305.c b/src/msvc/EverCrypt_Chacha20Poly1305.c
index 9a110bbf..0ff2d448 100644
--- a/src/msvc/EverCrypt_Chacha20Poly1305.c
+++ b/src/msvc/EverCrypt_Chacha20Poly1305.c
@@ -44,7 +44,7 @@ EverCrypt_Chacha20Poly1305_aead_encrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
+    KRML_MAYBE_UNUSED_VAR(vec128);
     Hacl_Chacha20Poly1305_256_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
     return;
   }
@@ -52,13 +52,13 @@ EverCrypt_Chacha20Poly1305_aead_encrypt(
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
+    KRML_MAYBE_UNUSED_VAR(vec256);
     Hacl_Chacha20Poly1305_128_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
   Hacl_Chacha20Poly1305_32_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
 }
 
@@ -79,19 +79,19 @@ EverCrypt_Chacha20Poly1305_aead_decrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
+    KRML_MAYBE_UNUSED_VAR(vec128);
     return Hacl_Chacha20Poly1305_256_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
+    KRML_MAYBE_UNUSED_VAR(vec256);
     return Hacl_Chacha20Poly1305_128_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
   return Hacl_Chacha20Poly1305_32_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
 }
 
diff --git a/src/msvc/EverCrypt_DRBG.c b/src/msvc/EverCrypt_DRBG.c
index 9591823c..1395f59f 100644
--- a/src/msvc/EverCrypt_DRBG.c
+++ b/src/msvc/EverCrypt_DRBG.c
@@ -28,15 +28,15 @@
 #include "internal/EverCrypt_HMAC.h"
 #include "lib_memzero0.h"
 
-uint32_t EverCrypt_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t EverCrypt_DRBG_reseed_interval = 1024U;
 
-uint32_t EverCrypt_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_output_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_additional_input_length = 65536U;
 
 uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 {
@@ -44,19 +44,19 @@ uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -92,7 +92,7 @@ EverCrypt_DRBG_uu___is_SHA1_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA1_s)
   {
     return true;
@@ -106,7 +106,7 @@ EverCrypt_DRBG_uu___is_SHA2_256_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_256_s)
   {
     return true;
@@ -120,7 +120,7 @@ EverCrypt_DRBG_uu___is_SHA2_384_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_384_s)
   {
     return true;
@@ -134,7 +134,7 @@ EverCrypt_DRBG_uu___is_SHA2_512_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_512_s)
   {
     return true;
@@ -149,10 +149,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -164,10 +164,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -179,10 +179,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -194,10 +194,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -247,7 +247,7 @@ instantiate_sha1(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -285,45 +285,43 @@ instantiate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 20U * sizeof (uint8_t));
+  memset(v, 1U, 20U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   return true;
 }
@@ -340,7 +338,7 @@ instantiate_sha2_256(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -378,45 +376,43 @@ instantiate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 32U * sizeof (uint8_t));
+  memset(v, 1U, 32U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   return true;
 }
@@ -433,7 +429,7 @@ instantiate_sha2_384(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -471,45 +467,43 @@ instantiate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 48U * sizeof (uint8_t));
+  memset(v, 1U, 48U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   return true;
 }
@@ -526,7 +520,7 @@ instantiate_sha2_512(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -564,45 +558,43 @@ instantiate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 64U * sizeof (uint8_t));
+  memset(v, 1U, 64U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   return true;
 }
@@ -649,42 +641,42 @@ reseed_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 21U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -730,42 +722,42 @@ reseed_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 33U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -811,42 +803,42 @@ reseed_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 49U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -892,42 +884,42 @@ reseed_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 65U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -992,42 +984,42 @@ generate_sha1(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 21U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)21U,
+        memcpy(input0 + 21U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[20U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-      EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[20U] = 0U;
+      EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+      EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+      memcpy(k, k_, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)21U,
+          memcpy(input + 21U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[20U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-        EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+        input[20U] = 1U;
+        EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+        EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+        memcpy(k, k_0, 20U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1037,16 +1029,16 @@ generate_sha1(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA1_s)
   {
-    x1 = st_s.case_SHA1_s;
+    ite = st_s.case_SHA1_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1062,87 +1054,87 @@ generate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)21U + additional_input_len;
+    uint32_t input_len = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[20U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-    EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[20U] = 0U;
+    EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+    EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+    memcpy(k, k_, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+      uint32_t input_len0 = 21U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 20U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[20U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-      EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+      input[20U] = 1U;
+      EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+      EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+      memcpy(k, k_0, 20U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)20U;
+  uint32_t max = n / 20U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)20U < n)
+  if (max * 20U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)20U;
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 20U;
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)21U + additional_input_len;
+  uint32_t input_len = 21U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+    uint32_t input_len0 = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1207,42 +1199,42 @@ generate_sha2_256(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 33U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)33U,
+        memcpy(input0 + 33U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[32U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[32U] = 0U;
+      EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+      memcpy(k, k_, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)33U,
+          memcpy(input + 33U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[32U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+        input[32U] = 1U;
+        EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+        memcpy(k, k_0, 32U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1252,16 +1244,16 @@ generate_sha2_256(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_256_s)
   {
-    x1 = st_s.case_SHA2_256_s;
+    ite = st_s.case_SHA2_256_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1277,87 +1269,87 @@ generate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)33U + additional_input_len;
+    uint32_t input_len = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[32U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[32U] = 0U;
+    EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+    memcpy(k, k_, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+      uint32_t input_len0 = 33U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 32U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[32U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+      input[32U] = 1U;
+      EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+      memcpy(k, k_0, 32U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)32U;
+  uint32_t max = n / 32U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)32U < n)
+  if (max * 32U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)32U;
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 32U;
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)33U + additional_input_len;
+  uint32_t input_len = 33U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+    uint32_t input_len0 = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1422,42 +1414,42 @@ generate_sha2_384(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 49U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)49U,
+        memcpy(input0 + 49U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[48U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[48U] = 0U;
+      EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+      memcpy(k, k_, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)49U,
+          memcpy(input + 49U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[48U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+        input[48U] = 1U;
+        EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+        memcpy(k, k_0, 48U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1467,16 +1459,16 @@ generate_sha2_384(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_384_s)
   {
-    x1 = st_s.case_SHA2_384_s;
+    ite = st_s.case_SHA2_384_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1492,87 +1484,87 @@ generate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)49U + additional_input_len;
+    uint32_t input_len = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[48U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[48U] = 0U;
+    EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+    memcpy(k, k_, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+      uint32_t input_len0 = 49U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 48U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[48U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+      input[48U] = 1U;
+      EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+      memcpy(k, k_0, 48U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)48U;
+  uint32_t max = n / 48U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)48U < n)
+  if (max * 48U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)48U;
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 48U;
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)49U + additional_input_len;
+  uint32_t input_len = 49U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+    uint32_t input_len0 = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1637,42 +1629,42 @@ generate_sha2_512(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 65U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)65U,
+        memcpy(input0 + 65U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[64U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[64U] = 0U;
+      EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+      memcpy(k, k_, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)65U,
+          memcpy(input + 65U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[64U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+        input[64U] = 1U;
+        EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+        memcpy(k, k_0, 64U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1682,16 +1674,16 @@ generate_sha2_512(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_512_s)
   {
-    x1 = st_s.case_SHA2_512_s;
+    ite = st_s.case_SHA2_512_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1707,87 +1699,87 @@ generate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)65U + additional_input_len;
+    uint32_t input_len = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[64U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[64U] = 0U;
+    EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+    memcpy(k, k_, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+      uint32_t input_len0 = 65U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 64U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[64U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+      input[64U] = 1U;
+      EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+      memcpy(k, k_0, 64U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)64U;
+  uint32_t max = n / 64U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)64U < n)
+  if (max * 64U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)64U;
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 64U;
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)65U + additional_input_len;
+  uint32_t input_len = 65U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+    uint32_t input_len0 = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1806,9 +1798,9 @@ static void uninstantiate_sha1(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)20U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)20U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 20U, uint8_t);
+  Lib_Memzero0_memzero(v, 20U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1830,9 +1822,9 @@ static void uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)32U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 32U, uint8_t);
+  Lib_Memzero0_memzero(v, 32U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1854,9 +1846,9 @@ static void uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)48U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 48U, uint8_t);
+  Lib_Memzero0_memzero(v, 48U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1878,9 +1870,9 @@ static void uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)64U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 64U, uint8_t);
+  Lib_Memzero0_memzero(v, 64U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
diff --git a/src/msvc/EverCrypt_HKDF.c b/src/msvc/EverCrypt_HKDF.c
index a802095d..cbccb94f 100644
--- a/src/msvc/EverCrypt_HKDF.c
+++ b/src/msvc/EverCrypt_HKDF.c
@@ -37,39 +37,39 @@ expand_sha1(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)20U;
+  uint32_t tlen = 20U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -92,39 +92,39 @@ expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -147,39 +147,39 @@ expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -202,39 +202,39 @@ expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -257,39 +257,39 @@ expand_blake2s(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -312,39 +312,39 @@ expand_blake2b(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/EverCrypt_HMAC.c b/src/msvc/EverCrypt_HMAC.c
index f279dfb8..2fa5d157 100644
--- a/src/msvc/EverCrypt_HMAC.c
+++ b/src/msvc/EverCrypt_HMAC.c
@@ -79,23 +79,23 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -105,42 +105,37 @@ EverCrypt_HMAC_compute_sha1(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -152,25 +147,21 @@ EverCrypt_HMAC_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
+    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, 1U);
     Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -181,12 +172,9 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
+  Hacl_Hash_SHA1_legacy_update_multi(s, opad, 1U);
   Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
+  Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
 }
 
@@ -199,23 +187,23 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -225,48 +213,45 @@ EverCrypt_HMAC_compute_sha2_256(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_SHA2_Scalar32_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -278,9 +263,9 @@ EverCrypt_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    EverCrypt_Hash_update_multi_256(s, ipad, (uint32_t)1U);
+    EverCrypt_Hash_update_multi_256(s, ipad, 1U);
     EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
       + (uint64_t)full_blocks_len
       + (uint64_t)rem_len,
       rem_len,
@@ -290,15 +275,14 @@ EverCrypt_HMAC_compute_sha2_256(
   Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -309,9 +293,9 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  EverCrypt_Hash_update_multi_256(s, opad, (uint32_t)1U);
+  EverCrypt_Hash_update_multi_256(s, opad, 1U);
   EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
     + (uint64_t)full_blocks_len
     + (uint64_t)rem_len,
     rem_len,
@@ -329,23 +313,23 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -355,49 +339,49 @@ EverCrypt_HMAC_compute_sha2_384(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -409,9 +393,9 @@ EverCrypt_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -421,15 +405,14 @@ EverCrypt_HMAC_compute_sha2_384(
   Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -440,9 +423,9 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -460,23 +443,23 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -486,49 +469,49 @@ EverCrypt_HMAC_compute_sha2_512(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -540,9 +523,9 @@ EverCrypt_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -552,15 +535,14 @@ EverCrypt_HMAC_compute_sha2_512(
   Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -571,9 +553,9 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -591,66 +573,66 @@ EverCrypt_HMAC_compute_blake2s(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2s_32_blake2s(32U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Blake2s_32_blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -663,9 +645,9 @@ EverCrypt_HMAC_compute_blake2s(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
@@ -675,22 +657,21 @@ EverCrypt_HMAC_compute_blake2s(
     Hacl_Blake2s_32_blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Blake2s_32_blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -702,9 +683,9 @@ EverCrypt_HMAC_compute_blake2s(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
@@ -714,10 +695,10 @@ EverCrypt_HMAC_compute_blake2s(
   Hacl_Blake2s_32_blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst, s0);
 }
 
 void
@@ -729,71 +710,71 @@ EverCrypt_HMAC_compute_blake2b(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2b_32_blake2b(64U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -806,14 +787,14 @@ EverCrypt_HMAC_compute_blake2b(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -823,23 +804,22 @@ EverCrypt_HMAC_compute_blake2b(
     Hacl_Blake2b_32_blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Blake2b_32_blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -851,14 +831,14 @@ EverCrypt_HMAC_compute_blake2b(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -868,11 +848,11 @@ EverCrypt_HMAC_compute_blake2b(
   Hacl_Blake2b_32_blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst, s0);
 }
 
 void
diff --git a/src/msvc/EverCrypt_Hash.c b/src/msvc/EverCrypt_Hash.c
index b88df9e2..ea3a1dea 100644
--- a/src/msvc/EverCrypt_Hash.c
+++ b/src/msvc/EverCrypt_Hash.c
@@ -146,61 +146,61 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = MD5_s, { .case_MD5_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA1_s, { .case_SHA1_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_224_s, { .case_SHA2_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_256_s, { .case_SHA2_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_384_s, { .case_SHA2_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_512_s, { .case_SHA2_512_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_224_s, { .case_SHA3_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_256_s, { .case_SHA3_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_384_s, { .case_SHA3_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_512_s, { .case_SHA3_512_s = buf } });
         break;
       }
@@ -220,11 +220,11 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
         }
         else
         {
-          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         }
         #else
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         #endif
         break;
@@ -245,11 +245,11 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
         }
         else
         {
-          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         }
         #else
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         #endif
         break;
@@ -308,58 +308,58 @@ static void init(EverCrypt_Hash_state_s *s)
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Blake2s_32_blake2s_init(p1, 0U, 32U);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Blake2s_128_blake2s_init(p1, 0U, 32U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Blake2b_32_blake2b_init(p1, 0U, 64U);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Blake2b_256_blake2b_init(p1, 0U, 64U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -373,22 +373,16 @@ static void init(EverCrypt_Hash_state_s *s)
 static uint32_t
 k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
@@ -399,13 +393,13 @@ void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
   if (has_shaext && has_sse)
   {
     uint64_t n1 = (uint64_t)n;
-    KRML_HOST_IGNORE(sha256_update(s, blocks, n1, k224_256));
+    sha256_update(s, blocks, n1, k224_256);
     return;
   }
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * 64U, blocks, s);
   #else
   KRML_HOST_IGNORE(k224_256);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * 64U, blocks, s);
   #endif
 }
 
@@ -416,100 +410,100 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     Hacl_Hash_MD5_legacy_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     Hacl_Hash_SHA1_legacy_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    uint32_t n = len / (uint32_t)144U;
+    uint32_t n = len / 144U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    uint32_t n = len / (uint32_t)136U;
+    uint32_t n = len / 136U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    uint32_t n = len / (uint32_t)104U;
+    uint32_t n = len / 104U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    uint32_t n = len / (uint32_t)72U;
+    uint32_t n = len / 72U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, p1, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Blake2s_32_blake2s_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Blake2s_128_blake2s_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -521,9 +515,9 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -531,7 +525,7 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
       n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -629,7 +623,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
     Hacl_Blake2s_128_blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -658,7 +652,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
       last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -711,58 +705,58 @@ static void finish(EverCrypt_Hash_state_s *s, uint8_t *dst)
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)144U, (uint32_t)28U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 144U, 28U, dst);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)136U, (uint32_t)32U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 136U, 32U, dst);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)104U, (uint32_t)48U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 104U, 48U, dst);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)72U, (uint32_t)64U, dst);
+    Hacl_Impl_SHA3_squeeze(p1, 72U, 64U, dst);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Blake2s_32_blake2s_finish(32U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Blake2s_128_blake2s_finish(32U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Blake2b_32_blake2b_finish(64U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Blake2b_256_blake2b_finish(64U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -873,7 +867,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)4U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 4U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA1_s)
@@ -889,7 +883,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)5U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 5U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_224_s)
@@ -905,7 +899,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_256_s)
@@ -921,7 +915,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_384_s)
@@ -937,7 +931,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA2_512_s)
@@ -953,7 +947,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_224_s)
@@ -969,7 +963,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_256_s)
@@ -985,7 +979,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_384_s)
@@ -1001,7 +995,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_512_s)
@@ -1017,7 +1011,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == Blake2S_s)
@@ -1027,7 +1021,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_s)
     {
       uint32_t *p_dst = scrut.case_Blake2S_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint32_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint32_t));
       return;
     }
     if (scrut.tag == Blake2S_128_s)
@@ -1037,7 +1031,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2s_128_load_state128s_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1054,7 +1048,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_s)
     {
       uint64_t *p_dst = scrut.case_Blake2B_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint64_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint64_t));
       return;
     }
     if (scrut.tag == Blake2B_256_s)
@@ -1064,7 +1058,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2b_256_load_state256b_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1081,7 +1075,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_128_s)
     {
       Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.case_Blake2S_128_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
       return;
     }
     if (scrut.tag == Blake2S_s)
@@ -1091,7 +1085,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2s_128_store_state128s_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1108,7 +1102,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_256_s)
     {
       Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.case_Blake2B_256_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
       return;
     }
     if (scrut.tag == Blake2B_s)
@@ -1118,7 +1112,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
       Hacl_Blake2b_256_store_state256b_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1201,59 +1195,59 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -1276,7 +1270,7 @@ EverCrypt_Hash_Incremental_hash_state
   uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
   EverCrypt_Hash_state_s *block_state = create_in(a);
   EverCrypt_Hash_Incremental_hash_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   EverCrypt_Hash_Incremental_hash_state
   *p =
     (EverCrypt_Hash_Incremental_hash_state *)KRML_HOST_MALLOC(sizeof (
@@ -1296,10 +1290,10 @@ void EverCrypt_Hash_Incremental_init(EverCrypt_Hash_Incremental_hash_state *s)
   uint8_t *buf = scrut.buf;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = alg_of_state(block_state);
-  KRML_HOST_IGNORE(i);
+  KRML_MAYBE_UNUSED_VAR(i);
   init(block_state);
   EverCrypt_Hash_Incremental_hash_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -1326,72 +1320,72 @@ EverCrypt_Hash_Incremental_update(
   {
     case Spec_Hash_Definitions_MD5:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     default:
@@ -1408,7 +1402,7 @@ EverCrypt_Hash_Incremental_update(
   else
   {
     uint32_t sz;
-    if (total_len % (uint64_t)block_len(i1) == (uint64_t)0U && total_len > (uint64_t)0U)
+    if (total_len % (uint64_t)block_len(i1) == 0ULL && total_len > 0ULL)
     {
       sz = block_len(i1);
     }
@@ -1423,7 +1417,7 @@ EverCrypt_Hash_Incremental_update(
       uint8_t *buf = s2.buf;
       uint64_t total_len1 = s2.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1444,14 +1438,14 @@ EverCrypt_Hash_Incremental_update(
           }
         );
     }
-    else if (sz == (uint32_t)0U)
+    else if (sz == 0U)
     {
       EverCrypt_Hash_Incremental_hash_state s2 = *s;
       EverCrypt_Hash_state_s *block_state1 = s2.block_state;
       uint8_t *buf = s2.buf;
       uint64_t total_len1 = s2.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1459,13 +1453,13 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
       }
       uint32_t ite0;
-      if ((uint64_t)len % (uint64_t)block_len(i1) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+      if ((uint64_t)len % (uint64_t)block_len(i1) == 0ULL && (uint64_t)len > 0ULL)
       {
         ite0 = block_len(i1);
       }
@@ -1501,7 +1495,7 @@ EverCrypt_Hash_Incremental_update(
       uint8_t *buf0 = s2.buf;
       uint64_t total_len10 = s2.total_len;
       uint32_t sz10;
-      if (total_len10 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+      if (total_len10 % (uint64_t)block_len(i1) == 0ULL && total_len10 > 0ULL)
       {
         sz10 = block_len(i1);
       }
@@ -1526,7 +1520,7 @@ EverCrypt_Hash_Incremental_update(
       uint8_t *buf = s20.buf;
       uint64_t total_len1 = s20.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1534,19 +1528,13 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
       }
       uint32_t ite0;
-      if
-      (
-        (uint64_t)(len - diff)
-        % (uint64_t)block_len(i1)
-        == (uint64_t)0U
-        && (uint64_t)(len - diff) > (uint64_t)0U
-      )
+      if ((uint64_t)(len - diff) % (uint64_t)block_len(i1) == 0ULL && (uint64_t)(len - diff) > 0ULL)
       {
         ite0 = block_len(i1);
       }
@@ -1599,13 +1587,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_MD5)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_MD5) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1620,7 +1602,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_MD5) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_MD5) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1630,7 +1612,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1643,13 +1625,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA1)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA1) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1664,7 +1640,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA1) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA1) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1674,7 +1650,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1688,12 +1664,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1708,7 +1679,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1718,7 +1689,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1732,12 +1703,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1752,7 +1718,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1762,7 +1728,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1776,12 +1742,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1796,7 +1757,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1806,7 +1767,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1820,12 +1781,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1840,7 +1796,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1850,7 +1806,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1864,12 +1820,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1884,7 +1835,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1894,7 +1845,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1908,12 +1859,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1928,7 +1874,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1938,7 +1884,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1952,12 +1898,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1972,7 +1913,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1982,7 +1923,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -1996,12 +1937,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2016,7 +1952,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2026,7 +1962,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -2039,13 +1975,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2075,7 +2005,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2S) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2S) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2085,7 +2015,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -2098,13 +2028,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2134,7 +2058,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2B) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2B) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2144,7 +2068,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
   finish(&tmp_block_state, dst);
@@ -2258,24 +2182,24 @@ void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uin
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2296,24 +2220,24 @@ static void hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2403,12 +2327,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec128 = EverCrypt_AutoConfig2_has_vec128();
         if (vec128)
         {
-          Hacl_Blake2s_128_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Blake2s_128_blake2s(32U, dst, len, input, 0U, NULL);
           return;
         }
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2s_32_blake2s(32U, dst, len, input, 0U, NULL);
         #else
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2s_32_blake2s(32U, dst, len, input, 0U, NULL);
         #endif
         break;
       }
@@ -2418,12 +2342,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec256 = EverCrypt_AutoConfig2_has_vec256();
         if (vec256)
         {
-          Hacl_Blake2b_256_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Blake2b_256_blake2b(64U, dst, len, input, 0U, NULL);
           return;
         }
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2b_32_blake2b(64U, dst, len, input, 0U, NULL);
         #else
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Blake2b_32_blake2b(64U, dst, len, input, 0U, NULL);
         #endif
         break;
       }
diff --git a/src/msvc/EverCrypt_Poly1305.c b/src/msvc/EverCrypt_Poly1305.c
index 454c0fce..f9e1e063 100644
--- a/src/msvc/EverCrypt_Poly1305.c
+++ b/src/msvc/EverCrypt_Poly1305.c
@@ -31,30 +31,30 @@
 KRML_MAYBE_UNUSED static void
 poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(src);
-  KRML_HOST_IGNORE(len);
-  KRML_HOST_IGNORE(key);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(src);
+  KRML_MAYBE_UNUSED_VAR(len);
+  KRML_MAYBE_UNUSED_VAR(key);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ctx[192U] = { 0U };
-  memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-  uint32_t n_blocks = len / (uint32_t)16U;
-  uint32_t n_extra = len % (uint32_t)16U;
+  memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+  uint32_t n_blocks = len / 16U;
+  uint32_t n_extra = len % 16U;
   uint8_t tmp[16U] = { 0U };
-  if (n_extra == (uint32_t)0U)
+  if (n_extra == 0U)
   {
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U));
+    x64_poly1305(ctx, src, (uint64_t)len, 1ULL);
   }
   else
   {
-    uint32_t len16 = n_blocks * (uint32_t)16U;
+    uint32_t len16 = n_blocks * 16U;
     uint8_t *src16 = src;
     memcpy(tmp, src + len16, n_extra * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U));
-    memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U));
+    x64_poly1305(ctx, src16, (uint64_t)len16, 0ULL);
+    memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+    x64_poly1305(ctx, tmp, (uint64_t)n_extra, 1ULL);
   }
-  memcpy(dst, ctx, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(dst, ctx, 16U * sizeof (uint8_t));
   #endif
 }
 
@@ -65,7 +65,7 @@ void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
+    KRML_MAYBE_UNUSED_VAR(vec128);
     Hacl_Poly1305_256_poly1305_mac(dst, len, src, key);
     return;
   }
@@ -73,13 +73,13 @@ void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
+    KRML_MAYBE_UNUSED_VAR(vec256);
     Hacl_Poly1305_128_poly1305_mac(dst, len, src, key);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec256);
-  KRML_HOST_IGNORE(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
   #if HACL_CAN_COMPILE_VALE
   poly1305_vale(dst, src, len, key);
   #else
diff --git a/src/msvc/Hacl_Bignum.c b/src/msvc/Hacl_Bignum.c
index ca093c6d..b99423f3 100644
--- a/src/msvc/Hacl_Bignum.c
+++ b/src/msvc/Hacl_Bignum.c
@@ -37,12 +37,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u32(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *b0 = b;
@@ -52,23 +52,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint32_t c00 = c0;
   uint32_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b0, b1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t1;
-    uint32_t x = (((uint32_t)0U - c010) & t1[i]) | (~((uint32_t)0U - c010) & tmp_[i]);
+    uint32_t x = ((0U - c010) & t1[i]) | (~(0U - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c11 = c010;
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
@@ -81,66 +81,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint32_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
+  uint32_t *t67 = tmp + 3U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c_sign = c00 ^ c11;
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t67);
   uint32_t c31 = c2 - c3;
   uint32_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c41 = c2 + c4;
-  uint32_t mask = (uint32_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint32_t mask = 0U - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint32_t *os = t45;
     uint32_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint32_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c6 = r10;
   uint32_t c60 = c6;
   uint32_t c7 = c5 + c60;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t11 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i0);
-      uint32_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, (uint32_t)0U, res_i1);
-      uint32_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, (uint32_t)0U, res_i2);
-      uint32_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, (uint32_t)0U, res_i);
+      uint32_t t11 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i0);
+      uint32_t t110 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, 0U, res_i1);
+      uint32_t t111 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, 0U, res_i2);
+      uint32_t t112 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t11 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i);
     }
     uint32_t c110 = c;
     r1 = c110;
@@ -152,7 +147,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -164,12 +159,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u64(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *b0 = b;
@@ -179,23 +174,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint64_t c00 = c0;
   uint64_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b0, b1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t1;
-    uint64_t x = (((uint64_t)0U - c010) & t1[i]) | (~((uint64_t)0U - c010) & tmp_[i]);
+    uint64_t x = ((0ULL - c010) & t1[i]) | (~(0ULL - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c11 = c010;
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
@@ -208,66 +203,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint64_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
+  uint64_t *t67 = tmp + 3U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c_sign = c00 ^ c11;
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t67);
   uint64_t c31 = c2 - c3;
   uint64_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c41 = c2 + c4;
-  uint64_t mask = (uint64_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint64_t mask = 0ULL - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t *os = t45;
     uint64_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint64_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c6 = r10;
   uint64_t c60 = c6;
   uint64_t c7 = c5 + c60;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t11 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i0);
-      uint64_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, (uint64_t)0U, res_i1);
-      uint64_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, (uint64_t)0U, res_i2);
-      uint64_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, (uint64_t)0U, res_i);
+      uint64_t t11 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i0);
+      uint64_t t110 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, 0ULL, res_i1);
+      uint64_t t111 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, 0ULL, res_i2);
+      uint64_t t112 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t11 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i);
     }
     uint64_t c110 = c;
     r1 = c110;
@@ -279,7 +269,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -290,27 +280,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u32(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *t0 = tmp;
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, t0, tmp1, t23);
@@ -322,54 +312,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c4 = r10;
   uint32_t c6 = c4;
   uint32_t c7 = c5 + c6;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t1 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t1 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     r1 = c10;
@@ -381,7 +366,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -392,27 +377,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u64(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *t0 = tmp;
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, t0, tmp1, t23);
@@ -424,54 +409,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c4 = r10;
   uint64_t c6 = c4;
   uint64_t c7 = c5 + c6;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t1 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t1 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     r1 = c10;
@@ -483,7 +463,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -495,27 +475,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -526,27 +506,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -555,7 +535,7 @@ Hacl_Bignum_bn_add_mod_n_u32(
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -572,27 +552,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -603,27 +583,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -632,7 +612,7 @@ Hacl_Bignum_bn_add_mod_n_u64(
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -649,27 +629,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -680,27 +660,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -708,9 +688,9 @@ Hacl_Bignum_bn_sub_mod_n_u32(
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -727,27 +707,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -758,27 +738,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -786,9 +766,9 @@ Hacl_Bignum_bn_sub_mod_n_u64(
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -798,42 +778,42 @@ Hacl_Bignum_bn_sub_mod_n_u64(
 
 uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0)
 {
-  uint32_t alpha = (uint32_t)2147483648U;
+  uint32_t alpha = 2147483648U;
   uint32_t beta = n0;
-  uint32_t ub = (uint32_t)0U;
-  uint32_t vb = (uint32_t)0U;
-  ub = (uint32_t)1U;
-  vb = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t ub = 0U;
+  uint32_t vb = 0U;
+  ub = 1U;
+  vb = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint32_t us = ub;
     uint32_t vs = vb;
-    uint32_t u_is_odd = (uint32_t)0U - (us & (uint32_t)1U);
+    uint32_t u_is_odd = 0U - (us & 1U);
     uint32_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint32_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
 
 uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0)
 {
-  uint64_t alpha = (uint64_t)9223372036854775808U;
+  uint64_t alpha = 9223372036854775808ULL;
   uint64_t beta = n0;
-  uint64_t ub = (uint64_t)0U;
-  uint64_t vb = (uint64_t)0U;
-  ub = (uint64_t)1U;
-  vb = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t ub = 0ULL;
+  uint64_t vb = 0ULL;
+  ub = 1ULL;
+  vb = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t us = ub;
     uint64_t vs = vb;
-    uint64_t u_is_odd = (uint64_t)0U - (us & (uint64_t)1U);
+    uint64_t u_is_odd = 0ULL - (us & 1ULL);
     uint64_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint64_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
@@ -844,15 +824,15 @@ uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n)
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   return m0 & m1;
@@ -867,46 +847,40 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(
 )
 {
   memset(res, 0U, len * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U * len - nBits; i0++)
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 64U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u32(len, n, res, res, res);
   }
 }
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
-  uint32_t len,
-  uint32_t *n,
-  uint32_t nInv,
-  uint32_t *c,
-  uint32_t *res
-)
+static void
+bn_mont_reduction_u32(uint32_t len, uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -923,27 +897,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -952,7 +926,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -973,11 +947,11 @@ Hacl_Bignum_Montgomery_bn_to_mont_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv, c, aM);
+  bn_mont_reduction_u32(len, n, nInv, c, aM);
 }
 
 void
@@ -993,7 +967,7 @@ Hacl_Bignum_Montgomery_bn_from_mont_u32(
   uint32_t *tmp = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(tmp, 0U, (len + len) * sizeof (uint32_t));
   memcpy(tmp, aM, len * sizeof (uint32_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, tmp, a);
+  bn_mont_reduction_u32(len, n, nInv_u64, tmp, a);
 }
 
 void
@@ -1009,11 +983,11 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
 void
@@ -1028,11 +1002,11 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
 uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n)
@@ -1041,15 +1015,15 @@ uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n)
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -1064,46 +1038,40 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(
 )
 {
   memset(res, 0U, len * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U * len - nBits; i0++)
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 128U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u64(len, n, res, res, res);
   }
 }
 
-void
-Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
-  uint32_t len,
-  uint64_t *n,
-  uint64_t nInv,
-  uint64_t *c,
-  uint64_t *res
-)
+static void
+bn_mont_reduction_u64(uint32_t len, uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1120,27 +1088,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
-  uint64_t c1 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint64_t c1 = 0ULL;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -1149,7 +1117,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -1170,11 +1138,11 @@ Hacl_Bignum_Montgomery_bn_to_mont_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv, c, aM);
+  bn_mont_reduction_u64(len, n, nInv, c, aM);
 }
 
 void
@@ -1190,7 +1158,7 @@ Hacl_Bignum_Montgomery_bn_from_mont_u64(
   uint64_t *tmp = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(tmp, 0U, (len + len) * sizeof (uint64_t));
   memcpy(tmp, aM, len * sizeof (uint64_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, tmp, a);
+  bn_mont_reduction_u64(len, n, nInv_u64, tmp, a);
 }
 
 void
@@ -1206,11 +1174,11 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
 void
@@ -1225,15 +1193,15 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
-static void
-bn_almost_mont_reduction_u32(
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(
   uint32_t len,
   uint32_t *n,
   uint32_t nInv,
@@ -1241,28 +1209,28 @@ bn_almost_mont_reduction_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -1280,9 +1248,9 @@ bn_almost_mont_reduction_u32(
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1303,11 +1271,11 @@ bn_almost_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
-  bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
 static void
@@ -1322,15 +1290,15 @@ bn_almost_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
-  bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
 
-static void
-bn_almost_mont_reduction_u64(
+void
+Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(
   uint32_t len,
   uint64_t *n,
   uint64_t nInv,
@@ -1338,28 +1306,28 @@ bn_almost_mont_reduction_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1377,9 +1345,9 @@ bn_almost_mont_reduction_u64(
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1400,11 +1368,11 @@ bn_almost_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
-  bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
 static void
@@ -1419,11 +1387,11 @@ bn_almost_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
-  bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
 
 uint32_t
@@ -1439,56 +1407,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t *b2 = (uint32_t *)alloca(bLen * sizeof (uint32_t));
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -1507,19 +1475,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t));
     memset(aM, 0U, len * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-    memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t));
     memset(resM, 0U, len * sizeof (uint32_t));
@@ -1531,13 +1492,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u32(len, ctx_n0, mu, resM, aM, resM);
@@ -1545,44 +1506,33 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
       uint32_t *ctx_n0 = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n0, mu, aM, aM);
     }
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t *tmp = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-    memset(tmp, 0U, (len + len) * sizeof (uint32_t));
-    memcpy(tmp, resM, len * sizeof (uint32_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(aM, 0U, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-  memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len * sizeof (uint32_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t *table = (uint32_t *)alloca(16U * len * sizeof (uint32_t));
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1593,21 +1543,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
     const uint32_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
@@ -1619,29 +1569,25 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
-  uint32_t *tmp1 = (uint32_t *)alloca(len * sizeof (uint32_t));
-  memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  uint32_t *tmp0 = (uint32_t *)alloca(len * sizeof (uint32_t));
+  memset(tmp0, 0U, len * sizeof (uint32_t));
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
     const uint32_t *a_bits_l = table + bits_l32 * len;
-    memcpy(tmp1, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
+    memcpy(tmp0, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t *tmp2 = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-  memset(tmp2, 0U, (len + len) * sizeof (uint32_t));
-  memcpy(tmp2, resM, len * sizeof (uint32_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
 }
 
 void
@@ -1656,19 +1602,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t));
     memset(aM, 0U, len * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-    memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t));
     memset(resM, 0U, len * sizeof (uint32_t));
@@ -1677,20 +1616,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     memset(ctx, 0U, (len + len) * sizeof (uint32_t));
     memcpy(ctx, n, len * sizeof (uint32_t));
     memcpy(ctx + len, r2, len * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -1701,50 +1640,39 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-    uint32_t *tmp = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-    memset(tmp, 0U, (len + len) * sizeof (uint32_t));
-    memcpy(tmp, resM, len * sizeof (uint32_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(aM, 0U, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t *c0 = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-  memset(c0, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c0);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c0, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len * sizeof (uint32_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t *table = (uint32_t *)alloca(16U * len * sizeof (uint32_t));
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1755,29 +1683,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -1791,39 +1719,35 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
-  uint32_t *tmp1 = (uint32_t *)alloca(len * sizeof (uint32_t));
-  memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  uint32_t *tmp0 = (uint32_t *)alloca(len * sizeof (uint32_t));
+  memset(tmp0, 0U, len * sizeof (uint32_t));
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint32_t *os = tmp1;
-        uint32_t x = (c & res_j[i]) | (~c & tmp1[i]);
+        uint32_t *os = tmp0;
+        uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;
       });
     uint32_t *ctx_n = ctx;
-    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t *tmp2 = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-  memset(tmp2, 0U, (len + len) * sizeof (uint32_t));
-  memcpy(tmp2, resM, len * sizeof (uint32_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res);
 }
 
 void
@@ -1877,56 +1801,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -1945,19 +1869,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t));
     memset(aM, 0U, len * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-    memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t));
     memset(resM, 0U, len * sizeof (uint64_t));
@@ -1969,13 +1886,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u64(len, ctx_n0, mu, resM, aM, resM);
@@ -1983,44 +1900,33 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
       uint64_t *ctx_n0 = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n0, mu, aM, aM);
     }
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t *tmp = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-    memset(tmp, 0U, (len + len) * sizeof (uint64_t));
-    memcpy(tmp, resM, len * sizeof (uint64_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(aM, 0U, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-  memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len * sizeof (uint64_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t *table = (uint64_t *)alloca(16U * len * sizeof (uint64_t));
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2031,21 +1937,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
     const uint64_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
@@ -2057,29 +1963,25 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
-  uint64_t *tmp1 = (uint64_t *)alloca(len * sizeof (uint64_t));
-  memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  uint64_t *tmp0 = (uint64_t *)alloca(len * sizeof (uint64_t));
+  memset(tmp0, 0U, len * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
     const uint64_t *a_bits_l = table + bits_l32 * len;
-    memcpy(tmp1, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
+    memcpy(tmp0, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t *tmp2 = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-  memset(tmp2, 0U, (len + len) * sizeof (uint64_t));
-  memcpy(tmp2, resM, len * sizeof (uint64_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
 }
 
 void
@@ -2094,19 +1996,12 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t));
     memset(aM, 0U, len * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-    memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-    Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
+    Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t));
     memset(resM, 0U, len * sizeof (uint64_t));
@@ -2115,20 +2010,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     memset(ctx, 0U, (len + len) * sizeof (uint64_t));
     memcpy(ctx, n, len * sizeof (uint64_t));
     memcpy(ctx + len, r2, len * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -2139,50 +2034,39 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-    uint64_t *tmp = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-    memset(tmp, 0U, (len + len) * sizeof (uint64_t));
-    memcpy(tmp, resM, len * sizeof (uint64_t));
-    Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp, res);
+    Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
     return;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(aM, 0U, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t *c0 = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-  memset(c0, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c0);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c0, aM);
+  Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len * sizeof (uint64_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t *table = (uint64_t *)alloca(16U * len * sizeof (uint64_t));
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2193,29 +2077,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -2229,39 +2113,35 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
-  uint64_t *tmp1 = (uint64_t *)alloca(len * sizeof (uint64_t));
-  memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  uint64_t *tmp0 = (uint64_t *)alloca(len * sizeof (uint64_t));
+  memset(tmp0, 0U, len * sizeof (uint64_t));
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint64_t *os = tmp1;
-        uint64_t x = (c & res_j[i]) | (~c & tmp1[i]);
+        uint64_t *os = tmp0;
+        uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;
       });
     uint64_t *ctx_n = ctx;
-    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp1, resM);
+    bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM);
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t *tmp2 = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-  memset(tmp2, 0U, (len + len) * sizeof (uint64_t));
-  memcpy(tmp2, resM, len * sizeof (uint64_t));
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp2, res);
+  Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res);
 }
 
 void
diff --git a/src/msvc/Hacl_Bignum256.c b/src/msvc/Hacl_Bignum256.c
index b516e70d..a4f00b83 100644
--- a/src/msvc/Hacl_Bignum256.c
+++ b/src/msvc/Hacl_Bignum256.c
@@ -60,23 +60,23 @@ Write `a + b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -91,23 +91,23 @@ Write `a - b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   return c;
@@ -125,52 +125,52 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -188,53 +188,53 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -248,30 +248,30 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 /**
@@ -282,31 +282,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -314,29 +314,29 @@ void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 4U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_add_mod(n, res, res, res);
   }
@@ -344,112 +344,119 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
 }
 
+static inline void to(uint64_t *n, uint64_t nInv, uint64_t *r2, uint64_t *a, uint64_t *aM)
+{
+  uint64_t c[8U] = { 0U };
+  Hacl_Bignum256_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, aM, 4U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
   uint64_t c1 = Hacl_Bignum256_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -459,82 +466,14 @@ static inline void
 amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t bj = bM[i0];
-    uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    {
-      uint64_t a_i = aM[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c1, res_i0);
-      uint64_t a_i0 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c1, res_i1);
-      uint64_t a_i1 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c1, res_i2);
-      uint64_t a_i2 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c1, res_i);
-    }
-    uint64_t r = c1;
-    c[(uint32_t)4U + i0] = r;);
+  Hacl_Bignum256_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *ab = aM;
-    uint64_t a_j = aM[i0];
-    uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
-    {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c1, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c1, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c1, res_i);
-    }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
-    {
-      uint64_t a_i = ab[i];
-      uint64_t *res_i = res_j + i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i);
-    }
-    uint64_t r = c1;
-    c[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, c, c);
-  KRML_HOST_IGNORE(c0);
-  uint64_t tmp[8U] = { 0U };
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(aM[i], aM[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
-    uint64_t lo = FStar_UInt128_uint128_to_uint64(res);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+  Hacl_Bignum256_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -543,50 +482,9 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[4U] = { 0U };
   uint64_t a1[8U] = { 0U };
-  memcpy(a1, a, (uint32_t)8U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = mu * a1[i0];
-    uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
-    }
-    uint64_t r = c;
-    uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)4U + i0;
-    uint64_t res_j = a1[(uint32_t)4U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
-  uint64_t c00 = c0;
-  uint64_t tmp[4U] = { 0U };
-  uint64_t c1 = Hacl_Bignum256_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = a_mod;
-    uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;);
-  uint64_t c[8U] = { 0U };
-  Hacl_Bignum256_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 8U * sizeof (uint64_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -603,23 +501,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[4U] = { 0U };
     precompr2(nBits, n, r2);
@@ -628,68 +525,68 @@ bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
   return m00 & m;
@@ -706,26 +603,24 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
-    uint64_t c[8U] = { 0U };
-    Hacl_Bignum256_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -733,86 +628,76 @@ exp_vartime_precomp(
       uint64_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[4U] = { 0U };
-  uint64_t c[8U] = { 0U };
-  Hacl_Bignum256_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(resM, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -826,32 +711,30 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
-    uint64_t c[8U] = { 0U };
-    Hacl_Bignum256_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        4U,
+        1U,
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint64_t *ctx_n0 = ctx;
@@ -862,73 +745,65 @@ exp_consttime_precomp(
     }
     uint64_t sw0 = sw;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      4U,
+      1U,
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
-    uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[4U] = { 0U };
-  uint64_t c0[8U] = { 0U };
-  Hacl_Bignum256_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -936,40 +811,38 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -1034,17 +907,16 @@ Hacl_Bignum256_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1077,17 +949,16 @@ Hacl_Bignum256_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1108,67 +979,66 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[4U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR3(i,
-      (uint32_t)0U,
-      (uint32_t)3U,
-      (uint32_t)1U,
+      0U,
+      3U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1192,17 +1062,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum256_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
+  memcpy(n11, n, 4U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)4U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 4U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1330,21 +1198,21 @@ Hacl_Bignum256_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[4U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1366,36 +1234,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1415,36 +1275,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1462,12 +1314,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, b[4U - i - 1U]););
 }
 
 /**
@@ -1479,12 +1327,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(res + i * (uint32_t)8U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(res + i * 8U, b[i]););
 }
 
 
@@ -1500,14 +1344,14 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   return acc;
 }
 
@@ -1518,11 +1362,11 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
diff --git a/src/msvc/Hacl_Bignum256_32.c b/src/msvc/Hacl_Bignum256_32.c
index 1c8ce59b..29a5a52e 100644
--- a/src/msvc/Hacl_Bignum256_32.c
+++ b/src/msvc/Hacl_Bignum256_32.c
@@ -60,26 +60,26 @@ Write `a + b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   return c;
 }
@@ -93,26 +93,26 @@ Write `a - b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   return c;
 }
@@ -129,56 +129,56 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -196,57 +196,57 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -260,32 +260,32 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
+    uint32_t c = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i););
     uint32_t r = c;
-    res[(uint32_t)8U + i0] = r;);
+    res[8U + i0] = r;);
 }
 
 /**
@@ -296,31 +296,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -328,29 +328,29 @@ void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
     }
     uint32_t r = c;
     res[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint32_t tmp[16U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 8U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_32_add_mod(n, res, res, res);
   }
@@ -358,118 +358,125 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
+  uint32_t c1 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i););
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
 }
 
+static inline void to(uint32_t *n, uint32_t nInv, uint32_t *r2, uint32_t *a, uint32_t *aM)
+{
+  uint32_t c[16U] = { 0U };
+  Hacl_Bignum256_32_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[16U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp, aM, 8U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
   uint32_t c1 = Hacl_Bignum256_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -479,84 +486,14 @@ static inline void
 amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
-  KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t bj = bM[i0];
-    uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = aM[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c1, res_i0);
-      uint32_t a_i0 = aM[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c1, res_i1);
-      uint32_t a_i1 = aM[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c1, res_i2);
-      uint32_t a_i2 = aM[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c1, res_i););
-    uint32_t r = c1;
-    c[(uint32_t)8U + i0] = r;);
+  Hacl_Bignum256_32_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
-  KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *ab = aM;
-    uint32_t a_j = aM[i0];
-    uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
-    {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c1, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c1, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c1, res_i);
-    }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
-    {
-      uint32_t a_i = ab[i];
-      uint32_t *res_i = res_j + i;
-      c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i);
-    }
-    uint32_t r = c1;
-    c[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, c, c);
-  KRML_HOST_IGNORE(c0);
-  uint32_t tmp[16U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint64_t res = (uint64_t)aM[i] * (uint64_t)aM[i];
-    uint32_t hi = (uint32_t)(res >> (uint32_t)32U);
-    uint32_t lo = (uint32_t)res;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+  Hacl_Bignum256_32_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -565,52 +502,9 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[8U] = { 0U };
   uint32_t a1[16U] = { 0U };
-  memcpy(a1, a, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t qj = mu * a1[i0];
-    uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i););
-    uint32_t r = c;
-    uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)8U + i0;
-    uint32_t res_j = a1[(uint32_t)8U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t c00 = c0;
-  uint32_t tmp[8U] = { 0U };
-  uint32_t c1 = Hacl_Bignum256_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = a_mod;
-    uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;);
-  uint32_t c[16U] = { 0U };
-  Hacl_Bignum256_32_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 16U * sizeof (uint32_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -627,22 +521,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[8U] = { 0U };
     precompr2(nBits, n, r2);
@@ -651,68 +545,68 @@ bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t *b2 = (uint32_t *)alloca(bLen * sizeof (uint32_t));
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
   return m00 & m;
@@ -729,26 +623,24 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
-    uint32_t c[16U] = { 0U };
-    Hacl_Bignum256_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -756,86 +648,76 @@ exp_vartime_precomp(
       uint32_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[8U] = { 0U };
-  uint32_t c[16U] = { 0U };
-  Hacl_Bignum256_32_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(resM, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -849,32 +731,30 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
-    uint32_t c[16U] = { 0U };
-    Hacl_Bignum256_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        8U,
+        1U,
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint32_t *ctx_n0 = ctx;
@@ -885,73 +765,65 @@ exp_consttime_precomp(
     }
     uint32_t sw0 = sw;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      8U,
+      1U,
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
-    uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[8U] = { 0U };
-  uint32_t c0[16U] = { 0U };
-  Hacl_Bignum256_32_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -959,40 +831,38 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -1057,16 +927,16 @@ Hacl_Bignum256_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1099,16 +969,16 @@ Hacl_Bignum256_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1129,80 +999,80 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[8U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[8U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
     {
-      uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * 0U];
+      uint32_t *res_i0 = res1 + 4U * 0U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * 0U + 1U];
+      uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * 0U + 2U];
+      uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * 0U + 3U];
+      uint32_t *res_i = res1 + 4U * 0U + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)4U,
-      (uint32_t)7U,
-      (uint32_t)1U,
+      4U,
+      7U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1226,16 +1096,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum256_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
+  memcpy(n11, n, 8U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)8U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = 8U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1363,35 +1232,35 @@ Hacl_Bignum256_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[8U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
   {
-    uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * 0U];
+    uint32_t *res_i0 = res1 + 4U * 0U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * 0U + 1U];
+    uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * 0U + 2U];
+    uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * 0U + 3U];
+    uint32_t *res_i = res1 + 4U * 0U + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)4U,
-    (uint32_t)7U,
-    (uint32_t)1U,
+    4U,
+    7U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1413,36 +1282,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1462,36 +1323,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1509,12 +1362,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)8U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(res + i * 4U, b[8U - i - 1U]););
 }
 
 /**
@@ -1526,12 +1375,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(res + i * (uint32_t)4U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(res + i * 4U, b[i]););
 }
 
 
@@ -1547,14 +1392,14 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   return acc;
 }
 
@@ -1565,11 +1410,11 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
diff --git a/src/msvc/Hacl_Bignum32.c b/src/msvc/Hacl_Bignum32.c
index f719a08e..55c3f90c 100644
--- a/src/msvc/Hacl_Bignum32.c
+++ b/src/msvc/Hacl_Bignum32.c
@@ -105,9 +105,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, b, tmp, res);
 }
 
@@ -119,9 +119,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, a, tmp, res);
 }
 
@@ -142,61 +142,8 @@ bn_slow_precomp(
   uint32_t *a1 = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(a1, 0U, (len + len) * sizeof (uint32_t));
   memcpy(a1, a, (len + len) * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
-  {
-    uint32_t qj = mu * a1[i0];
-    uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
-    {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
-    }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
-    {
-      uint32_t a_i = n[i];
-      uint32_t *res_i = res_j0 + i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i);
-    }
-    uint32_t r = c;
-    uint32_t c1 = r;
-    uint32_t *resb = a1 + len + i0;
-    uint32_t res_j = a1[len + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + len, (len + len - len) * sizeof (uint32_t));
-  uint32_t c00 = c0;
-  KRML_CHECK_SIZE(sizeof (uint32_t), len);
-  uint32_t *tmp0 = (uint32_t *)alloca(len * sizeof (uint32_t));
-  memset(tmp0, 0U, len * sizeof (uint32_t));
-  uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
-  {
-    uint32_t *os = a_mod;
-    uint32_t x = (m & tmp0[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
-  uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
-  memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a_mod, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, res);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, mu, a1, a_mod);
+  Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a_mod, res);
 }
 
 /**
@@ -216,20 +163,20 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *r2 = (uint32_t *)alloca(len * sizeof (uint32_t));
@@ -242,7 +189,7 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -276,8 +223,8 @@ Hacl_Bignum32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -285,7 +232,7 @@ Hacl_Bignum32_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -319,8 +266,8 @@ Hacl_Bignum32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -328,7 +275,7 @@ Hacl_Bignum32_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -353,23 +300,23 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *bn_zero = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(bn_zero, 0U, len * sizeof (uint32_t));
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -377,53 +324,48 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *n2 = (uint32_t *)alloca(len * sizeof (uint32_t));
     memset(n2, 0U, len * sizeof (uint32_t));
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
     uint32_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint32_t *a1 = n + (uint32_t)1U;
-      uint32_t *res1 = n2 + (uint32_t)1U;
+      uint32_t *a1 = n + 1U;
+      uint32_t *res1 = n2 + 1U;
       uint32_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint32_t t1 = a1[(uint32_t)4U * i];
-        uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-        uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-        uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-        uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+        uint32_t t1 = a1[4U * i];
+        uint32_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+        uint32_t t10 = a1[4U * i + 1U];
+        uint32_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+        uint32_t t11 = a1[4U * i + 2U];
+        uint32_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+        uint32_t t12 = a1[4U * i + 3U];
+        uint32_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint32_t t1 = a1[i];
         uint32_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
       }
       uint32_t c10 = c;
       c1 = c10;
@@ -432,20 +374,14 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)32U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, 32U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -477,7 +413,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -632,38 +568,33 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *n2 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -672,13 +603,13 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)32U * len1,
+    32U * len1,
     n2,
     res);
 }
@@ -702,36 +633,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -751,36 +674,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -797,14 +712,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_be(tmp + i * (uint32_t)4U, b[bnLen - i - (uint32_t)1U]);
+    store32_be(tmp + i * 4U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -817,14 +732,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_le(tmp + i * (uint32_t)4U, b[i]);
+    store32_le(tmp + i * 4U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -842,12 +757,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -859,8 +774,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Bignum4096.c b/src/msvc/Hacl_Bignum4096.c
index ee51cc5e..920ae2fb 100644
--- a/src/msvc/Hacl_Bignum4096.c
+++ b/src/msvc/Hacl_Bignum4096.c
@@ -63,26 +63,26 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   return c;
 }
@@ -96,26 +96,26 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   return c;
 }
@@ -132,53 +132,53 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -198,54 +198,54 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -262,7 +262,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(64U, a, b, tmp, res);
 }
 
 /**
@@ -274,16 +274,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_sqr(uint64_t *a, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(64U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 64U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_add_mod(n, res, res, res);
   }
@@ -291,61 +291,61 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i););
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -353,50 +353,57 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *
   }
 }
 
+static inline void to(uint64_t *n, uint64_t nInv, uint64_t *r2, uint64_t *a, uint64_t *aM)
+{
+  uint64_t c[128U] = { 0U };
+  Hacl_Bignum4096_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[128U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp, aM, 64U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
   uint64_t c1 = Hacl_Bignum4096_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -408,16 +415,14 @@ static inline void
 amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM)
 {
   uint64_t c[128U] = { 0U };
-  uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, aM, bM, tmp, c);
+  Hacl_Bignum4096_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM)
 {
   uint64_t c[128U] = { 0U };
-  uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, aM, tmp, c);
+  Hacl_Bignum4096_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -426,50 +431,9 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[64U] = { 0U };
   uint64_t a1[128U] = { 0U };
-  memcpy(a1, a, (uint32_t)128U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
-  {
-    uint64_t qj = mu * a1[i0];
-    uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i););
-    uint64_t r = c;
-    uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)64U + i0;
-    uint64_t res_j = a1[(uint32_t)64U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
-  uint64_t c00 = c0;
-  uint64_t tmp[64U] = { 0U };
-  uint64_t c1 = Hacl_Bignum4096_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
-  {
-    uint64_t *os = a_mod;
-    uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  uint64_t c[128U] = { 0U };
-  Hacl_Bignum4096_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 128U * sizeof (uint64_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -486,22 +450,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[64U] = { 0U };
     precompr2(nBits, n, r2);
@@ -510,65 +473,65 @@ bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -586,26 +549,24 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
-    uint64_t c[128U] = { 0U };
-    Hacl_Bignum4096_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -613,86 +574,76 @@ exp_vartime_precomp(
       uint64_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[64U] = { 0U };
-  uint64_t c[128U] = { 0U };
-  Hacl_Bignum4096_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(resM, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -706,30 +657,28 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
-    uint64_t c[128U] = { 0U };
-    Hacl_Bignum4096_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      for (uint32_t i = 0U; i < 64U; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -740,70 +689,62 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+    for (uint32_t i = 0U; i < 64U; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint64_t aM[64U] = { 0U };
-  uint64_t c0[128U] = { 0U };
-  Hacl_Bignum4096_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -813,28 +754,28 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -843,9 +784,7 @@ exp_consttime_precomp(
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -910,17 +849,16 @@ Hacl_Bignum4096_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -953,17 +891,16 @@ Hacl_Bignum4096_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -984,22 +921,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[64U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -1007,57 +944,56 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[64U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+      0U,
+      15U,
+      1U,
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
     KRML_MAYBE_FOR3(i,
-      (uint32_t)60U,
-      (uint32_t)63U,
-      (uint32_t)1U,
+      60U,
+      63U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1081,17 +1017,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum4096_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
+  memcpy(n11, n, 64U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)64U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 64U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1219,37 +1153,37 @@ Hacl_Bignum4096_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[64U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t t1 = a1[(uint32_t)4U * i];
-    uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-    uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-    uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-    uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+    0U,
+    15U,
+    1U,
+    uint64_t t1 = a1[4U * i];
+    uint64_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+    uint64_t t10 = a1[4U * i + 1U];
+    uint64_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+    uint64_t t11 = a1[4U * i + 2U];
+    uint64_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+    uint64_t t12 = a1[4U * i + 3U];
+    uint64_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
   KRML_MAYBE_FOR3(i,
-    (uint32_t)60U,
-    (uint32_t)63U,
-    (uint32_t)1U,
+    60U,
+    63U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1271,36 +1205,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1320,36 +1246,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1367,10 +1285,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)64U - i - (uint32_t)1U]);
+    store64_be(res + i * 8U, b[64U - i - 1U]);
   }
 }
 
@@ -1383,10 +1301,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_le(res + i * (uint32_t)8U, b[i]);
+    store64_le(res + i * 8U, b[i]);
   }
 }
 
@@ -1403,12 +1321,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -1420,8 +1338,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Bignum4096_32.c b/src/msvc/Hacl_Bignum4096_32.c
index 790d0428..f3330918 100644
--- a/src/msvc/Hacl_Bignum4096_32.c
+++ b/src/msvc/Hacl_Bignum4096_32.c
@@ -64,24 +64,24 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   return c;
@@ -96,24 +96,24 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   return c;
@@ -131,51 +131,51 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -195,52 +195,52 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -257,7 +257,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(128U, a, b, tmp, res);
 }
 
 /**
@@ -269,16 +269,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_32_sqr(uint32_t *a, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(128U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 128U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_32_add_mod(n, res, res, res);
   }
@@ -286,59 +286,59 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -346,49 +346,56 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *
   }
 }
 
+static inline void to(uint32_t *n, uint32_t nInv, uint32_t *r2, uint32_t *a, uint32_t *aM)
+{
+  uint32_t c[256U] = { 0U };
+  Hacl_Bignum4096_32_mul(a, r2, c);
+  reduction(n, nInv, c, aM);
+}
+
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[256U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp, aM, 128U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
   uint32_t c1 = Hacl_Bignum4096_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -400,16 +407,14 @@ static inline void
 amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM)
 {
   uint32_t c[256U] = { 0U };
-  uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, aM, bM, tmp, c);
+  Hacl_Bignum4096_32_mul(aM, bM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM)
 {
   uint32_t c[256U] = { 0U };
-  uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, aM, tmp, c);
+  Hacl_Bignum4096_32_sqr(aM, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -418,49 +423,9 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[128U] = { 0U };
   uint32_t a1[256U] = { 0U };
-  memcpy(a1, a, (uint32_t)256U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
-  {
-    uint32_t qj = mu * a1[i0];
-    uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
-    {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
-    }
-    uint32_t r = c;
-    uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)128U + i0;
-    uint32_t res_j = a1[(uint32_t)128U + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t c00 = c0;
-  uint32_t tmp[128U] = { 0U };
-  uint32_t c1 = Hacl_Bignum4096_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
-  {
-    uint32_t *os = a_mod;
-    uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  uint32_t c[256U] = { 0U };
-  Hacl_Bignum4096_32_mul(a_mod, r2, c);
-  reduction(n, mu, c, res);
+  memcpy(a1, a, 256U * sizeof (uint32_t));
+  areduction(n, mu, a1, a_mod);
+  to(n, mu, r2, a_mod, res);
 }
 
 /**
@@ -477,21 +442,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[128U] = { 0U };
     precompr2(nBits, n, r2);
@@ -500,65 +465,65 @@ bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t *b2 = (uint32_t *)alloca(bLen * sizeof (uint32_t));
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -576,26 +541,24 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
-    uint32_t c[256U] = { 0U };
-    Hacl_Bignum4096_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -603,86 +566,76 @@ exp_vartime_precomp(
       uint32_t *ctx_n0 = ctx;
       amont_sqr(ctx_n0, mu, aM, aM);
     }
-    uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[128U] = { 0U };
-  uint32_t c[256U] = { 0U };
-  Hacl_Bignum4096_32_mul(a, r2, c);
-  reduction(n, mu, c, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(resM, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -696,30 +649,28 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
-    uint32_t c[256U] = { 0U };
-    Hacl_Bignum4096_32_mul(a, r2, c);
-    reduction(n, mu, c, aM);
+    to(n, mu, r2, a, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      for (uint32_t i = 0U; i < 128U; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -730,70 +681,62 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+    for (uint32_t i = 0U; i < 128U; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
-    uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
-    reduction(n, mu, tmp, res);
+    from(n, mu, resM, res);
     return;
   }
   uint32_t aM[128U] = { 0U };
-  uint32_t c0[256U] = { 0U };
-  Hacl_Bignum4096_32_mul(a, r2, c0);
-  reduction(n, mu, c0, aM);
+  to(n, mu, r2, a, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -803,28 +746,28 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -833,9 +776,7 @@ exp_consttime_precomp(
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
-  uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
-  reduction(n, mu, tmp1, res);
+  from(n, mu, resM, res);
 }
 
 static inline void
@@ -900,16 +841,16 @@ Hacl_Bignum4096_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -942,16 +883,16 @@ Hacl_Bignum4096_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -972,22 +913,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[128U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -995,55 +936,55 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[128U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+    for (uint32_t i = 0U; i < 31U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)124U,
-      (uint32_t)127U,
-      (uint32_t)1U,
+      124U,
+      127U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1067,16 +1008,16 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum4096_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
+  memcpy(n11, n, 128U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)128U, .n = n11, .mu = mu, .r2 = r21 };
+  res = { .len = 128U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1204,36 +1145,36 @@ Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[128U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+  for (uint32_t i = 0U; i < 31U; i++)
   {
-    uint32_t t1 = a1[(uint32_t)4U * i];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * i];
+    uint32_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * i + 1U];
+    uint32_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * i + 2U];
+    uint32_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * i + 3U];
+    uint32_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)124U,
-    (uint32_t)127U,
-    (uint32_t)1U,
+    124U,
+    127U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1255,36 +1196,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1304,36 +1237,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1351,10 +1276,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)128U - i - (uint32_t)1U]);
+    store32_be(res + i * 4U, b[128U - i - 1U]);
   }
 }
 
@@ -1367,10 +1292,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_le(res + i * (uint32_t)4U, b[i]);
+    store32_le(res + i * 4U, b[i]);
   }
 }
 
@@ -1387,12 +1312,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -1404,8 +1329,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Bignum64.c b/src/msvc/Hacl_Bignum64.c
index 9e701c7b..e64b1a54 100644
--- a/src/msvc/Hacl_Bignum64.c
+++ b/src/msvc/Hacl_Bignum64.c
@@ -104,9 +104,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum64_mul(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, b, tmp, res);
 }
 
@@ -118,9 +118,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum64_sqr(uint32_t len, uint64_t *a, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, a, tmp, res);
 }
 
@@ -141,61 +141,8 @@ bn_slow_precomp(
   uint64_t *a1 = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(a1, 0U, (len + len) * sizeof (uint64_t));
   memcpy(a1, a, (len + len) * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
-  {
-    uint64_t qj = mu * a1[i0];
-    uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
-    {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
-    }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
-    {
-      uint64_t a_i = n[i];
-      uint64_t *res_i = res_j0 + i;
-      c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i);
-    }
-    uint64_t r = c;
-    uint64_t c1 = r;
-    uint64_t *resb = a1 + len + i0;
-    uint64_t res_j = a1[len + i0];
-    c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);
-  }
-  memcpy(a_mod, a1 + len, (len + len - len) * sizeof (uint64_t));
-  uint64_t c00 = c0;
-  KRML_CHECK_SIZE(sizeof (uint64_t), len);
-  uint64_t *tmp0 = (uint64_t *)alloca(len * sizeof (uint64_t));
-  memset(tmp0, 0U, len * sizeof (uint64_t));
-  uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
-  {
-    uint64_t *os = a_mod;
-    uint64_t x = (m & tmp0[i]) | (~m & a_mod[i]);
-    os[i] = x;
-  }
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
-  uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
-  memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a_mod, r2, tmp, c);
-  Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, res);
+  Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, mu, a1, a_mod);
+  Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a_mod, res);
 }
 
 /**
@@ -215,20 +162,20 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *r2 = (uint64_t *)alloca(len * sizeof (uint64_t));
@@ -241,7 +188,7 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -275,8 +222,8 @@ Hacl_Bignum64_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -284,7 +231,7 @@ Hacl_Bignum64_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -318,8 +265,8 @@ Hacl_Bignum64_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -327,7 +274,7 @@ Hacl_Bignum64_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -352,23 +299,23 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *bn_zero = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(bn_zero, 0U, len * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -376,53 +323,48 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *n2 = (uint64_t *)alloca(len * sizeof (uint64_t));
     memset(n2, 0U, len * sizeof (uint64_t));
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
     uint64_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint64_t *a1 = n + (uint32_t)1U;
-      uint64_t *res1 = n2 + (uint32_t)1U;
+      uint64_t *a1 = n + 1U;
+      uint64_t *res1 = n2 + 1U;
       uint64_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint64_t t1 = a1[(uint32_t)4U * i];
-        uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-        uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-        uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-        uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+        uint64_t t1 = a1[4U * i];
+        uint64_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+        uint64_t t10 = a1[4U * i + 1U];
+        uint64_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+        uint64_t t11 = a1[4U * i + 2U];
+        uint64_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+        uint64_t t12 = a1[4U * i + 3U];
+        uint64_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint64_t t1 = a1[i];
         uint64_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
       }
       uint64_t c10 = c;
       c1 = c10;
@@ -431,20 +373,14 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)64U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, 64U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -476,7 +412,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -631,38 +567,33 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *n2 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -671,13 +602,13 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)64U * len1,
+    64U * len1,
     n2,
     res);
 }
@@ -701,36 +632,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -750,36 +673,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -796,14 +711,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_be(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -816,14 +731,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_le(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_le(tmp + i * (uint32_t)8U, b[i]);
+    store64_le(tmp + i * 8U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -841,12 +756,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -858,8 +773,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_eq_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Chacha20.c b/src/msvc/Hacl_Chacha20.c
index 8966e19e..38a5c373 100644
--- a/src/msvc/Hacl_Chacha20.c
+++ b/src/msvc/Hacl_Chacha20.c
@@ -28,7 +28,7 @@
 const
 uint32_t
 Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+  { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
 {
@@ -37,7 +37,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std0 = st[d];
   uint32_t sta10 = sta + stb0;
   uint32_t std10 = std0 ^ sta10;
-  uint32_t std2 = std10 << (uint32_t)16U | std10 >> (uint32_t)16U;
+  uint32_t std2 = std10 << 16U | std10 >> 16U;
   st[a] = sta10;
   st[d] = std2;
   uint32_t sta0 = st[c];
@@ -45,7 +45,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std3 = st[b];
   uint32_t sta11 = sta0 + stb1;
   uint32_t std11 = std3 ^ sta11;
-  uint32_t std20 = std11 << (uint32_t)12U | std11 >> (uint32_t)20U;
+  uint32_t std20 = std11 << 12U | std11 >> 20U;
   st[c] = sta11;
   st[b] = std20;
   uint32_t sta2 = st[a];
@@ -53,7 +53,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std4 = st[d];
   uint32_t sta12 = sta2 + stb2;
   uint32_t std12 = std4 ^ sta12;
-  uint32_t std21 = std12 << (uint32_t)8U | std12 >> (uint32_t)24U;
+  uint32_t std21 = std12 << 8U | std12 >> 24U;
   st[a] = sta12;
   st[d] = std21;
   uint32_t sta3 = st[c];
@@ -61,21 +61,21 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std = st[b];
   uint32_t sta1 = sta3 + stb;
   uint32_t std1 = std ^ sta1;
-  uint32_t std22 = std1 << (uint32_t)7U | std1 >> (uint32_t)25U;
+  uint32_t std22 = std1 << 7U | std1 >> 25U;
   st[c] = sta1;
   st[b] = std22;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 1U, 5U, 9U, 13U);
+  quarter_round(st, 2U, 6U, 10U, 14U);
+  quarter_round(st, 3U, 7U, 11U, 15U);
+  quarter_round(st, 0U, 5U, 10U, 15U);
+  quarter_round(st, 1U, 6U, 11U, 12U);
+  quarter_round(st, 2U, 7U, 8U, 13U);
+  quarter_round(st, 3U, 4U, 9U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -94,14 +94,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[12U] = k[12U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -110,35 +110,34 @@ static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 
 static const
 uint32_t
-chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+chacha20_constants[4U] = { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -151,27 +150,23 @@ static void chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr, u
   chacha20_core(k, ctx, incr);
   uint32_t bl[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
-    uint8_t *bj = text + i * (uint32_t)4U;
+    uint8_t *bj = text + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
     uint32_t x = bl[i] ^ k[i];
     os[i] = x;);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, bl[i]););
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, bl[i]););
 }
 
 static inline void
@@ -186,16 +181,16 @@ chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr,
 void
 Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
 {
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U);
+    chacha20_encrypt_block(ctx, out + i * 64U, i, text + i * 64U);
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    chacha20_encrypt_last(ctx, rem, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
+    chacha20_encrypt_last(ctx, rem, out + nb * 64U, nb, text + nb * 64U);
   }
 }
 
diff --git a/src/msvc/Hacl_Chacha20Poly1305_128.c b/src/msvc/Hacl_Chacha20Poly1305_128.c
index 4cf2eae9..297f1c8f 100644
--- a/src/msvc/Hacl_Chacha20Poly1305_128.c
+++ b/src/msvc/Hacl_Chacha20Poly1305_128.c
@@ -32,56 +32,51 @@
 static inline void
 poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)32U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 32U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t00;
     Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -92,12 +87,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -202,37 +197,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -268,43 +254,39 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     }
     Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -315,12 +297,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -435,37 +417,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -479,41 +452,37 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -524,12 +493,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -644,37 +613,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -690,40 +650,36 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec128 *acc = ctx;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -734,12 +690,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -854,37 +810,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -914,48 +861,44 @@ poly1305_do_128(
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U };
   uint8_t block[16U] = { 0U };
   Hacl_Poly1305_128_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  if (aadlen != 0U)
   {
     poly1305_padded_128(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_128(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
   Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec128 f01 = f010;
   Lib_IntVector_Intrinsics_vec128 f111 = f110;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -966,12 +909,12 @@ poly1305_do_128(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
   Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1086,37 +1029,28 @@ poly1305_do_128(
   Lib_IntVector_Intrinsics_vec128 t2 = a26;
   Lib_IntVector_Intrinsics_vec128 t3 = a36;
   Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1160,9 +1094,9 @@ Hacl_Chacha20Poly1305_128_aead_encrypt(
   uint8_t *mac
 )
 {
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
 }
@@ -1202,22 +1136,22 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(
 {
   uint8_t computed_mac[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Chacha20Poly1305_256.c b/src/msvc/Hacl_Chacha20Poly1305_256.c
index c3dfec03..6a278daa 100644
--- a/src/msvc/Hacl_Chacha20Poly1305_256.c
+++ b/src/msvc/Hacl_Chacha20Poly1305_256.c
@@ -32,58 +32,52 @@
 static inline void
 poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)64U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 64U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t00;
     Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -94,12 +88,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -204,37 +198,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -270,43 +255,39 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     }
     Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -317,12 +298,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -437,37 +418,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -481,41 +453,37 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -526,12 +494,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -646,37 +614,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -692,40 +651,36 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec256 *acc = ctx;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -736,12 +691,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -856,37 +811,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -916,48 +862,44 @@ poly1305_do_256(
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U };
   uint8_t block[16U] = { 0U };
   Hacl_Poly1305_256_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  if (aadlen != 0U)
   {
     poly1305_padded_256(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_256(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec256 f01 = f010;
   Lib_IntVector_Intrinsics_vec256 f111 = f110;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -968,12 +910,12 @@ poly1305_do_256(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
   Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1088,37 +1030,28 @@ poly1305_do_256(
   Lib_IntVector_Intrinsics_vec256 t2 = a26;
   Lib_IntVector_Intrinsics_vec256 t3 = a36;
   Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1162,9 +1095,9 @@ Hacl_Chacha20Poly1305_256_aead_encrypt(
   uint8_t *mac
 )
 {
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_256(key, aadlen, aad, mlen, cipher, mac);
 }
@@ -1204,22 +1137,22 @@ Hacl_Chacha20Poly1305_256_aead_decrypt(
 {
   uint8_t computed_mac[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_256(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Chacha20Poly1305_32.c b/src/msvc/Hacl_Chacha20Poly1305_32.c
index 179af485..211cf619 100644
--- a/src/msvc/Hacl_Chacha20Poly1305_32.c
+++ b/src/msvc/Hacl_Chacha20Poly1305_32.c
@@ -29,29 +29,29 @@
 
 static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  uint64_t *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  uint64_t *pre0 = ctx + 5U;
   uint64_t *acc0 = ctx;
-  uint32_t nb = n * (uint32_t)16U / (uint32_t)16U;
-  uint32_t rem1 = n * (uint32_t)16U % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = n * 16U / 16U;
+  uint32_t rem1 = n * 16U % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = blocks + i * (uint32_t)16U;
+    uint8_t *block = blocks + i * 16U;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -62,12 +62,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -122,28 +122,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -157,23 +157,23 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = blocks + nb * (uint32_t)16U;
+    uint8_t *last = blocks + nb * 16U;
     uint64_t e[5U] = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -184,12 +184,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     uint64_t mask = b;
-    uint64_t fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = fi | mask;
+    uint64_t fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = fi | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -244,28 +244,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -281,22 +281,22 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    uint64_t *pre = ctx + (uint32_t)5U;
+    uint64_t *pre = ctx + 5U;
     uint64_t *acc = ctx;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -307,12 +307,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -367,28 +367,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -418,30 +418,30 @@ poly1305_do_32(
   uint64_t ctx[25U] = { 0U };
   uint8_t block[16U] = { 0U };
   Hacl_Poly1305_32_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  if (aadlen != 0U)
   {
     poly1305_padded_32(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_32(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  uint64_t *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
   uint64_t e[5U] = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   uint64_t f0 = lo;
   uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
+  uint64_t f010 = f0 & 0x3ffffffULL;
+  uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+  uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = f1 >> 40U;
   uint64_t f01 = f010;
   uint64_t f111 = f110;
   uint64_t f2 = f20;
@@ -452,12 +452,12 @@ poly1305_do_32(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   uint64_t mask = b;
   uint64_t f4 = e[4U];
   e[4U] = f4 | mask;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
+  uint64_t *r5 = pre + 5U;
   uint64_t r0 = r[0U];
   uint64_t r1 = r[1U];
   uint64_t r2 = r[2U];
@@ -512,28 +512,28 @@ poly1305_do_32(
   uint64_t t2 = a26;
   uint64_t t3 = a36;
   uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
+  uint64_t mask26 = 0x3ffffffULL;
+  uint64_t z0 = t0 >> 26U;
+  uint64_t z1 = t3 >> 26U;
   uint64_t x0 = t0 & mask26;
   uint64_t x3 = t3 & mask26;
   uint64_t x1 = t1 + z0;
   uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
+  uint64_t z01 = x1 >> 26U;
+  uint64_t z11 = x4 >> 26U;
+  uint64_t t = z11 << 2U;
   uint64_t z12 = z11 + t;
   uint64_t x11 = x1 & mask26;
   uint64_t x41 = x4 & mask26;
   uint64_t x2 = t2 + z01;
   uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
+  uint64_t z02 = x2 >> 26U;
+  uint64_t z13 = x01 >> 26U;
   uint64_t x21 = x2 & mask26;
   uint64_t x02 = x01 & mask26;
   uint64_t x31 = x3 + z02;
   uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
+  uint64_t z03 = x31 >> 26U;
   uint64_t x32 = x31 & mask26;
   uint64_t x42 = x41 + z03;
   uint64_t o0 = x02;
@@ -577,9 +577,9 @@ Hacl_Chacha20Poly1305_32_aead_encrypt(
   uint8_t *mac
 )
 {
-  Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
 }
@@ -619,22 +619,22 @@ Hacl_Chacha20Poly1305_32_aead_decrypt(
 {
   uint8_t computed_mac[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, k, n, 0U);
   uint8_t *key = tmp;
   poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Chacha20_Vec128.c b/src/msvc/Hacl_Chacha20_Vec128.c
index 1e0c4ec1..deab1dfc 100644
--- a/src/msvc/Hacl_Chacha20_Vec128.c
+++ b/src/msvc/Hacl_Chacha20_Vec128.c
@@ -32,100 +32,100 @@ static inline void double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std0 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std1 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std2 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std3 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std4 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std5 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std6 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std7 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std8 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std9 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std10 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std11 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std12 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std13 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std14 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std15 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std16 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std17 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std18 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std19 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std20 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std21 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std22 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std23 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std24 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std25 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std26 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std27 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std28 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std29 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std30 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_128(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint32_t ctr_u32 = (uint32_t)4U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint32_t ctr_u32 = 4U * ctr;
   Lib_IntVector_Intrinsics_vec128 cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
   double_round_128(k);
@@ -150,9 +150,9 @@ chacha20_core_128(
   double_round_128(k);
   double_round_128(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = k;
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,47 +164,42 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x);
     os[i] = x0;);
-  Lib_IntVector_Intrinsics_vec128
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U);
+  Lib_IntVector_Intrinsics_vec128 ctr1 = Lib_IntVector_Intrinsics_vec128_load32s(0U, 1U, 2U, 3U);
   Lib_IntVector_Intrinsics_vec128 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec128_add32(c12, ctr1);
 }
@@ -221,13 +216,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = text + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = text + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -359,19 +354,19 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -503,13 +498,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -526,13 +521,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = cipher + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -664,19 +659,19 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -808,13 +803,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/msvc/Hacl_Chacha20_Vec256.c b/src/msvc/Hacl_Chacha20_Vec256.c
index 620f5040..e61a7cfe 100644
--- a/src/msvc/Hacl_Chacha20_Vec256.c
+++ b/src/msvc/Hacl_Chacha20_Vec256.c
@@ -32,100 +32,100 @@ static inline void double_round_256(Lib_IntVector_Intrinsics_vec256 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std0 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std1 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std2 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std3 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std4 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std5 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std6 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std7 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std8 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std9 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std10 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std11 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std12 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std13 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std14 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std15 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std16 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std17 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std18 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std19 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std20 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std21 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std22 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std23 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std24 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std25 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std26 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std27 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std28 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std29 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std30 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_256(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint32_t ctr_u32 = (uint32_t)8U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint32_t ctr_u32 = 8U * ctr;
   Lib_IntVector_Intrinsics_vec256 cv = Lib_IntVector_Intrinsics_vec256_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv);
   double_round_256(k);
@@ -150,9 +150,9 @@ chacha20_core_256(
   double_round_256(k);
   double_round_256(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = k;
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,51 +164,43 @@ chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_load32(x);
     os[i] = x0;);
   Lib_IntVector_Intrinsics_vec256
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec256_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U,
-      (uint32_t)4U,
-      (uint32_t)5U,
-      (uint32_t)6U,
-      (uint32_t)7U);
+  ctr1 = Lib_IntVector_Intrinsics_vec256_load32s(0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U);
   Lib_IntVector_Intrinsics_vec256 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec256_add32(c12, ctr1);
 }
@@ -225,13 +217,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = text + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = text + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -459,19 +451,19 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -699,13 +691,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -722,13 +714,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = cipher + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -956,19 +948,19 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -1196,13 +1188,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/msvc/Hacl_Chacha20_Vec32.c b/src/msvc/Hacl_Chacha20_Vec32.c
index 2bf4764c..0dce915c 100644
--- a/src/msvc/Hacl_Chacha20_Vec32.c
+++ b/src/msvc/Hacl_Chacha20_Vec32.c
@@ -31,106 +31,106 @@ static inline void double_round_32(uint32_t *st)
 {
   st[0U] = st[0U] + st[4U];
   uint32_t std = st[12U] ^ st[0U];
-  st[12U] = std << (uint32_t)16U | std >> (uint32_t)16U;
+  st[12U] = std << 16U | std >> 16U;
   st[8U] = st[8U] + st[12U];
   uint32_t std0 = st[4U] ^ st[8U];
-  st[4U] = std0 << (uint32_t)12U | std0 >> (uint32_t)20U;
+  st[4U] = std0 << 12U | std0 >> 20U;
   st[0U] = st[0U] + st[4U];
   uint32_t std1 = st[12U] ^ st[0U];
-  st[12U] = std1 << (uint32_t)8U | std1 >> (uint32_t)24U;
+  st[12U] = std1 << 8U | std1 >> 24U;
   st[8U] = st[8U] + st[12U];
   uint32_t std2 = st[4U] ^ st[8U];
-  st[4U] = std2 << (uint32_t)7U | std2 >> (uint32_t)25U;
+  st[4U] = std2 << 7U | std2 >> 25U;
   st[1U] = st[1U] + st[5U];
   uint32_t std3 = st[13U] ^ st[1U];
-  st[13U] = std3 << (uint32_t)16U | std3 >> (uint32_t)16U;
+  st[13U] = std3 << 16U | std3 >> 16U;
   st[9U] = st[9U] + st[13U];
   uint32_t std4 = st[5U] ^ st[9U];
-  st[5U] = std4 << (uint32_t)12U | std4 >> (uint32_t)20U;
+  st[5U] = std4 << 12U | std4 >> 20U;
   st[1U] = st[1U] + st[5U];
   uint32_t std5 = st[13U] ^ st[1U];
-  st[13U] = std5 << (uint32_t)8U | std5 >> (uint32_t)24U;
+  st[13U] = std5 << 8U | std5 >> 24U;
   st[9U] = st[9U] + st[13U];
   uint32_t std6 = st[5U] ^ st[9U];
-  st[5U] = std6 << (uint32_t)7U | std6 >> (uint32_t)25U;
+  st[5U] = std6 << 7U | std6 >> 25U;
   st[2U] = st[2U] + st[6U];
   uint32_t std7 = st[14U] ^ st[2U];
-  st[14U] = std7 << (uint32_t)16U | std7 >> (uint32_t)16U;
+  st[14U] = std7 << 16U | std7 >> 16U;
   st[10U] = st[10U] + st[14U];
   uint32_t std8 = st[6U] ^ st[10U];
-  st[6U] = std8 << (uint32_t)12U | std8 >> (uint32_t)20U;
+  st[6U] = std8 << 12U | std8 >> 20U;
   st[2U] = st[2U] + st[6U];
   uint32_t std9 = st[14U] ^ st[2U];
-  st[14U] = std9 << (uint32_t)8U | std9 >> (uint32_t)24U;
+  st[14U] = std9 << 8U | std9 >> 24U;
   st[10U] = st[10U] + st[14U];
   uint32_t std10 = st[6U] ^ st[10U];
-  st[6U] = std10 << (uint32_t)7U | std10 >> (uint32_t)25U;
+  st[6U] = std10 << 7U | std10 >> 25U;
   st[3U] = st[3U] + st[7U];
   uint32_t std11 = st[15U] ^ st[3U];
-  st[15U] = std11 << (uint32_t)16U | std11 >> (uint32_t)16U;
+  st[15U] = std11 << 16U | std11 >> 16U;
   st[11U] = st[11U] + st[15U];
   uint32_t std12 = st[7U] ^ st[11U];
-  st[7U] = std12 << (uint32_t)12U | std12 >> (uint32_t)20U;
+  st[7U] = std12 << 12U | std12 >> 20U;
   st[3U] = st[3U] + st[7U];
   uint32_t std13 = st[15U] ^ st[3U];
-  st[15U] = std13 << (uint32_t)8U | std13 >> (uint32_t)24U;
+  st[15U] = std13 << 8U | std13 >> 24U;
   st[11U] = st[11U] + st[15U];
   uint32_t std14 = st[7U] ^ st[11U];
-  st[7U] = std14 << (uint32_t)7U | std14 >> (uint32_t)25U;
+  st[7U] = std14 << 7U | std14 >> 25U;
   st[0U] = st[0U] + st[5U];
   uint32_t std15 = st[15U] ^ st[0U];
-  st[15U] = std15 << (uint32_t)16U | std15 >> (uint32_t)16U;
+  st[15U] = std15 << 16U | std15 >> 16U;
   st[10U] = st[10U] + st[15U];
   uint32_t std16 = st[5U] ^ st[10U];
-  st[5U] = std16 << (uint32_t)12U | std16 >> (uint32_t)20U;
+  st[5U] = std16 << 12U | std16 >> 20U;
   st[0U] = st[0U] + st[5U];
   uint32_t std17 = st[15U] ^ st[0U];
-  st[15U] = std17 << (uint32_t)8U | std17 >> (uint32_t)24U;
+  st[15U] = std17 << 8U | std17 >> 24U;
   st[10U] = st[10U] + st[15U];
   uint32_t std18 = st[5U] ^ st[10U];
-  st[5U] = std18 << (uint32_t)7U | std18 >> (uint32_t)25U;
+  st[5U] = std18 << 7U | std18 >> 25U;
   st[1U] = st[1U] + st[6U];
   uint32_t std19 = st[12U] ^ st[1U];
-  st[12U] = std19 << (uint32_t)16U | std19 >> (uint32_t)16U;
+  st[12U] = std19 << 16U | std19 >> 16U;
   st[11U] = st[11U] + st[12U];
   uint32_t std20 = st[6U] ^ st[11U];
-  st[6U] = std20 << (uint32_t)12U | std20 >> (uint32_t)20U;
+  st[6U] = std20 << 12U | std20 >> 20U;
   st[1U] = st[1U] + st[6U];
   uint32_t std21 = st[12U] ^ st[1U];
-  st[12U] = std21 << (uint32_t)8U | std21 >> (uint32_t)24U;
+  st[12U] = std21 << 8U | std21 >> 24U;
   st[11U] = st[11U] + st[12U];
   uint32_t std22 = st[6U] ^ st[11U];
-  st[6U] = std22 << (uint32_t)7U | std22 >> (uint32_t)25U;
+  st[6U] = std22 << 7U | std22 >> 25U;
   st[2U] = st[2U] + st[7U];
   uint32_t std23 = st[13U] ^ st[2U];
-  st[13U] = std23 << (uint32_t)16U | std23 >> (uint32_t)16U;
+  st[13U] = std23 << 16U | std23 >> 16U;
   st[8U] = st[8U] + st[13U];
   uint32_t std24 = st[7U] ^ st[8U];
-  st[7U] = std24 << (uint32_t)12U | std24 >> (uint32_t)20U;
+  st[7U] = std24 << 12U | std24 >> 20U;
   st[2U] = st[2U] + st[7U];
   uint32_t std25 = st[13U] ^ st[2U];
-  st[13U] = std25 << (uint32_t)8U | std25 >> (uint32_t)24U;
+  st[13U] = std25 << 8U | std25 >> 24U;
   st[8U] = st[8U] + st[13U];
   uint32_t std26 = st[7U] ^ st[8U];
-  st[7U] = std26 << (uint32_t)7U | std26 >> (uint32_t)25U;
+  st[7U] = std26 << 7U | std26 >> 25U;
   st[3U] = st[3U] + st[4U];
   uint32_t std27 = st[14U] ^ st[3U];
-  st[14U] = std27 << (uint32_t)16U | std27 >> (uint32_t)16U;
+  st[14U] = std27 << 16U | std27 >> 16U;
   st[9U] = st[9U] + st[14U];
   uint32_t std28 = st[4U] ^ st[9U];
-  st[4U] = std28 << (uint32_t)12U | std28 >> (uint32_t)20U;
+  st[4U] = std28 << 12U | std28 >> 20U;
   st[3U] = st[3U] + st[4U];
   uint32_t std29 = st[14U] ^ st[3U];
-  st[14U] = std29 << (uint32_t)8U | std29 >> (uint32_t)24U;
+  st[14U] = std29 << 8U | std29 >> 24U;
   st[9U] = st[9U] + st[14U];
   uint32_t std30 = st[4U] ^ st[9U];
-  st[4U] = std30 << (uint32_t)7U | std30 >> (uint32_t)25U;
+  st[4U] = std30 << 7U | std30 >> 25U;
 }
 
 static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t ctr_u32 = (uint32_t)1U * ctr;
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
+  uint32_t ctr_u32 = 1U * ctr;
   uint32_t cv = ctr_u32;
   k[12U] = k[12U] + cv;
   double_round_32(k);
@@ -144,9 +144,9 @@ static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
   double_round_32(k);
   double_round_32(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -157,41 +157,41 @@ static inline void chacha20_init_32(uint32_t *ctx, uint8_t *k, uint8_t *n, uint3
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = ctx1[i];
     os[i] = x;);
-  uint32_t ctr1 = (uint32_t)0U;
+  uint32_t ctr1 = 0U;
   uint32_t c12 = ctx[12U];
   ctx[12U] = c12 + ctr1;
 }
@@ -208,39 +208,39 @@ Hacl_Chacha20_Vec32_chacha20_encrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -257,39 +257,39 @@ Hacl_Chacha20_Vec32_chacha20_decrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/msvc/Hacl_Curve25519_51.c b/src/msvc/Hacl_Curve25519_51.c
index 64c855cf..ca561e89 100644
--- a/src/msvc/Hacl_Curve25519_51.c
+++ b/src/msvc/Hacl_Curve25519_51.c
@@ -28,38 +28,38 @@
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Bignum25519_51.h"
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *nq_p1 = p01_tmp1 + 10U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
-  uint64_t *z3 = nq_p1 + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
+  uint64_t *z3 = nq_p1 + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
+  uint64_t *b = tmp1 + 5U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)5U;
+  uint64_t *z31 = nq_p1 + 5U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)5U;
+  uint64_t *c0 = dc + 5U;
   Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2);
   Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0);
   Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b1 = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)10U;
+  uint64_t *dc1 = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -68,7 +68,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
   a1[3U] = c[3U];
   a1[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2);
@@ -77,13 +77,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
 static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2);
@@ -93,7 +93,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
   a[3U] = c[3U];
   a[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b, b, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2);
 }
@@ -101,46 +101,41 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
 static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
 {
   FStar_UInt128_uint128 tmp2[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp2[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp2[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   uint64_t p01_tmp1_swap[41U] = { 0U };
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)10U;
-  memcpy(p11, init, (uint32_t)10U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 10U;
+  memcpy(p11, init, 10U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)5U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  x0[4U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
-  z0[4U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 5U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  x0[4U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
+  z0[4U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)40U;
-  Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 10U;
+  uint64_t *swap = p01_tmp1_swap + 40U;
+  Hacl_Impl_Curve25519_Field51_cswap2(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
+    uint64_t *swap1 = p01_tmp1_swap + 40U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)10U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 10U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -149,11 +144,11 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)10U * sizeof (uint64_t));
+  memcpy(out, p0, 10U * sizeof (uint64_t));
 }
 
 void
@@ -165,7 +160,7 @@ Hacl_Curve25519_51_fsquare_times(
 )
 {
   Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp);
   }
@@ -175,60 +170,56 @@ void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tm
 {
   uint64_t t1[20U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)5U;
-  uint64_t *t010 = t1 + (uint32_t)15U;
+  uint64_t *b1 = t1 + 5U;
+  uint64_t *t010 = t1 + 15U;
   FStar_UInt128_uint128 *tmp10 = tmp;
-  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, 1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 2U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, i, tmp);
   Hacl_Impl_Curve25519_Field51_fmul(a1, b1, a1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 1U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, 5U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)5U;
-  uint64_t *c10 = t1 + (uint32_t)10U;
-  uint64_t *t011 = t1 + (uint32_t)15U;
+  uint64_t *b10 = t1 + 5U;
+  uint64_t *c10 = t1 + 10U;
+  uint64_t *t011 = t1 + 15U;
   FStar_UInt128_uint128 *tmp11 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, 20U);
   Hacl_Impl_Curve25519_Field51_fmul(t011, t011, c10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(b10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)5U;
-  uint64_t *c1 = t1 + (uint32_t)10U;
-  uint64_t *t01 = t1 + (uint32_t)15U;
+  uint64_t *b11 = t1 + 5U;
+  uint64_t *c1 = t1 + 10U;
+  uint64_t *t01 = t1 + 15U;
   FStar_UInt128_uint128 *tmp1 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, 100U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, c1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, b11, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)15U;
+  uint64_t *t0 = t1 + 15U;
   Hacl_Impl_Curve25519_Field51_fmul(o, t0, a, tmp);
 }
 
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)5U;
+  uint64_t *z = i + 5U;
   uint64_t tmp[5U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   FStar_UInt128_uint128 tmp_w[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp_w[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp_w[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(tmp, z, tmp_w);
   Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w);
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -243,32 +234,32 @@ void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[10U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)5U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  uint64_t f0l = tmp[0U] & (uint64_t)0x7ffffffffffffU;
-  uint64_t f0h = tmp[0U] >> (uint32_t)51U;
-  uint64_t f1l = (tmp[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  uint64_t f1h = tmp[1U] >> (uint32_t)38U;
-  uint64_t f2l = (tmp[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  uint64_t f2h = tmp[2U] >> (uint32_t)25U;
-  uint64_t f3l = (tmp[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  uint64_t f3h = tmp[3U] >> (uint32_t)12U;
+  uint64_t *z = init + 5U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  uint64_t f0l = tmp[0U] & 0x7ffffffffffffULL;
+  uint64_t f0h = tmp[0U] >> 51U;
+  uint64_t f1l = (tmp[1U] & 0x3fffffffffULL) << 13U;
+  uint64_t f1h = tmp[1U] >> 38U;
+  uint64_t f2l = (tmp[2U] & 0x1ffffffULL) << 26U;
+  uint64_t f2h = tmp[2U] >> 25U;
+  uint64_t f3l = (tmp[3U] & 0xfffULL) << 39U;
+  uint64_t f3h = tmp[3U] >> 12U;
   x[0U] = f0l;
   x[1U] = f0h | f1l;
   x[2U] = f1h | f2l;
@@ -289,7 +280,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -309,14 +300,14 @@ bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_51_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/msvc/Hacl_Curve25519_64.c b/src/msvc/Hacl_Curve25519_64.c
index fb0974fe..edcab306 100644
--- a/src/msvc/Hacl_Curve25519_64.c
+++ b/src/msvc/Hacl_Curve25519_64.c
@@ -35,7 +35,7 @@ static inline void add_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   add_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(add_scalar_e(out, f1, f2));
+  add_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -44,7 +44,7 @@ static inline void fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fadd(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fadd_e(out, f1, f2));
+  fadd_e(out, f1, f2);
   #endif
 }
 
@@ -53,7 +53,7 @@ static inline void fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsub(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fsub_e(out, f1, f2));
+  fsub_e(out, f1, f2);
   #endif
 }
 
@@ -62,7 +62,7 @@ static inline void fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tm
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul_e(tmp, f1, out, f2));
+  fmul_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -71,7 +71,7 @@ static inline void fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *t
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul2(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul2_e(tmp, f1, out, f2));
+  fmul2_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -80,7 +80,7 @@ static inline void fmul_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fmul_scalar_e(out, f1, f2));
+  fmul_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -89,7 +89,7 @@ static inline void fsqr0(uint64_t *out, uint64_t *f1, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr(out, f1, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr_e(tmp, f1, out));
+  fsqr_e(tmp, f1, out);
   #endif
 }
 
@@ -98,7 +98,7 @@ static inline void fsqr20(uint64_t *out, uint64_t *f, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr2(out, f, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr2_e(tmp, f, out));
+  fsqr2_e(tmp, f, out);
   #endif
 }
 
@@ -107,42 +107,42 @@ static inline void cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   cswap2(bit, p1, p2);
   #else
-  KRML_HOST_IGNORE(cswap2_e(bit, p1, p2));
+  cswap2_e(bit, p1, p2);
   #endif
 }
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *nq_p1 = p01_tmp1 + 8U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
-  uint64_t *z3 = nq_p1 + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
+  uint64_t *z3 = nq_p1 + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
+  uint64_t *b = tmp1 + 4U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)4U;
+  uint64_t *z31 = nq_p1 + 4U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)4U;
+  uint64_t *c0 = dc + 4U;
   fadd0(c0, x3, z31);
   fsub0(d0, x3, z31);
   fmul20(dc, dc, ab, tmp2);
   fadd0(x3, d0, c0);
   fsub0(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b1 = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)8U;
+  uint64_t *dc1 = tmp1 + 8U;
   fsqr20(dc1, ab1, tmp2);
   fsqr20(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -150,7 +150,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
   a1[2U] = c[2U];
   a1[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b1, c, (uint64_t)121665U);
+  fmul_scalar0(b1, c, 121665ULL);
   fadd0(b1, b1, d);
   fmul20(nq, dc1, ab1, tmp2);
   fmul0(z3, z3, x1, tmp2);
@@ -159,13 +159,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
 static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   fsqr20(dc, ab, tmp2);
@@ -174,7 +174,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
   a[2U] = c[2U];
   a[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b, c, (uint64_t)121665U);
+  fmul_scalar0(b, c, 121665ULL);
   fadd0(b, b, d);
   fmul20(nq, dc, ab, tmp2);
 }
@@ -186,38 +186,33 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)8U;
-  memcpy(p11, init, (uint32_t)8U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 8U;
+  memcpy(p11, init, 8U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)4U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 4U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)8U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)32U;
-  cswap20((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 8U;
+  uint64_t *swap = p01_tmp1_swap + 32U;
+  cswap20(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U;
+    uint64_t *swap1 = p01_tmp1_swap + 32U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 8U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     cswap20(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -226,17 +221,17 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   cswap20(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(out, p0, 8U * sizeof (uint64_t));
 }
 
 static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n)
 {
   fsqr0(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     fsqr0(o, o, tmp);
   }
@@ -246,66 +241,66 @@ static void finv(uint64_t *o, uint64_t *i, uint64_t *tmp)
 {
   uint64_t t1[16U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)4U;
-  uint64_t *t010 = t1 + (uint32_t)12U;
+  uint64_t *b1 = t1 + 4U;
+  uint64_t *t010 = t1 + 12U;
   uint64_t *tmp10 = tmp;
-  fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  fsquare_times(a1, i, tmp10, 1U);
+  fsquare_times(t010, a1, tmp10, 2U);
   fmul0(b1, t010, i, tmp);
   fmul0(a1, b1, a1, tmp);
-  fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  fsquare_times(t010, a1, tmp10, 1U);
   fmul0(b1, t010, b1, tmp);
-  fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  fsquare_times(t010, b1, tmp10, 5U);
   fmul0(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)4U;
-  uint64_t *c10 = t1 + (uint32_t)8U;
-  uint64_t *t011 = t1 + (uint32_t)12U;
+  uint64_t *b10 = t1 + 4U;
+  uint64_t *c10 = t1 + 8U;
+  uint64_t *t011 = t1 + 12U;
   uint64_t *tmp11 = tmp;
-  fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  fsquare_times(t011, b10, tmp11, 10U);
   fmul0(c10, t011, b10, tmp);
-  fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  fsquare_times(t011, c10, tmp11, 20U);
   fmul0(t011, t011, c10, tmp);
-  fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  fsquare_times(t011, t011, tmp11, 10U);
   fmul0(b10, t011, b10, tmp);
-  fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  fsquare_times(t011, b10, tmp11, 50U);
   fmul0(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)4U;
-  uint64_t *c1 = t1 + (uint32_t)8U;
-  uint64_t *t01 = t1 + (uint32_t)12U;
+  uint64_t *b11 = t1 + 4U;
+  uint64_t *c1 = t1 + 8U;
+  uint64_t *t01 = t1 + 12U;
   uint64_t *tmp1 = tmp;
-  fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  fsquare_times(t01, c1, tmp1, 100U);
   fmul0(t01, t01, c1, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  fsquare_times(t01, t01, tmp1, 50U);
   fmul0(t01, t01, b11, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)12U;
+  uint64_t *t0 = t1 + 12U;
   fmul0(o, t0, a, tmp);
 }
 
 static void store_felem(uint64_t *b, uint64_t *f)
 {
   uint64_t f30 = f[3U];
-  uint64_t top_bit0 = f30 >> (uint32_t)63U;
-  f[3U] = f30 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit0);
+  uint64_t top_bit0 = f30 >> 63U;
+  f[3U] = f30 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit0);
   uint64_t f31 = f[3U];
-  uint64_t top_bit = f31 >> (uint32_t)63U;
-  f[3U] = f31 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit);
+  uint64_t top_bit = f31 >> 63U;
+  f[3U] = f31 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0xffffffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0xffffffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0xffffffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7fffffffffffffffULL);
   uint64_t mask = ((m0 & m1) & m2) & m3;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0xffffffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0xffffffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0xffffffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7fffffffffffffffULL);
   uint64_t o0 = f0_;
   uint64_t o1 = f1_;
   uint64_t o2 = f2_;
@@ -319,18 +314,14 @@ static void store_felem(uint64_t *b, uint64_t *f)
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)4U;
+  uint64_t *z = i + 4U;
   uint64_t tmp[4U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   uint64_t tmp_w[16U] = { 0U };
   finv(tmp, z, tmp_w);
   fmul0(tmp, tmp, x, tmp_w);
   store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -345,23 +336,23 @@ void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[8U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)4U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
+  uint64_t *z = init + 4U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
   x[0U] = tmp[0U];
   x[1U] = tmp[1U];
   x[2U] = tmp[2U];
@@ -381,7 +372,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -401,14 +392,14 @@ bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_64_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/msvc/Hacl_EC_Ed25519.c b/src/msvc/Hacl_EC_Ed25519.c
index 46f2837b..6ab24a33 100644
--- a/src/msvc/Hacl_EC_Ed25519.c
+++ b/src/msvc/Hacl_EC_Ed25519.c
@@ -43,11 +43,11 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_zero(uint64_t *b)
 {
-  b[0U] = (uint64_t)0U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -57,11 +57,11 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_one(uint64_t *b)
 {
-  b[0U] = (uint64_t)1U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 1ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -106,8 +106,8 @@ Write `a * b mod p` in `out`.
 void Hacl_EC_Ed25519_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(out, a, b, tmp);
 }
 
@@ -123,8 +123,8 @@ Write `a * a mod p` in `out`.
 void Hacl_EC_Ed25519_felem_sqr(uint64_t *a, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
@@ -205,29 +205,29 @@ Write the base point (generator) in `p`.
 void Hacl_EC_Ed25519_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  uint64_t *gt = p + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  uint64_t *gt = p + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
 }
 
 /**
diff --git a/src/msvc/Hacl_EC_K256.c b/src/msvc/Hacl_EC_K256.c
index e48edb5b..581c223b 100644
--- a/src/msvc/Hacl_EC_K256.c
+++ b/src/msvc/Hacl_EC_K256.c
@@ -43,7 +43,7 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_zero(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
+  memset(f, 0U, 5U * sizeof (uint64_t));
 }
 
 /**
@@ -53,8 +53,8 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_one(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
-  f[0U] = (uint64_t)1U;
+  memset(f, 0U, 5U * sizeof (uint64_t));
+  f[0U] = 1ULL;
 }
 
 /**
@@ -83,7 +83,7 @@ Write `a - b mod p` in `out`.
 */
 void Hacl_EC_K256_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out)
 {
-  Hacl_K256_Field_fsub(out, a, b, (uint64_t)2U);
+  Hacl_K256_Field_fsub(out, a, b, 2ULL);
   Hacl_K256_Field_fnormalize_weak(out, out);
 }
 
@@ -189,20 +189,20 @@ Write the base point (generator) in `p`.
 void Hacl_EC_K256_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
 }
 
 /**
@@ -264,11 +264,11 @@ void Hacl_EC_K256_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out)
 {
   uint64_t scalar_q[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = scalar_q;
-    uint64_t u = load64_be(scalar + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(scalar + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   Hacl_Impl_K256_PointMul_point_mul(out, scalar_q, p);
@@ -307,20 +307,20 @@ void Hacl_EC_K256_point_load(uint8_t *b, uint64_t *out)
 {
   uint64_t p_aff[10U] = { 0U };
   uint64_t *px = p_aff;
-  uint64_t *py = p_aff + (uint32_t)5U;
+  uint64_t *py = p_aff + 5U;
   uint8_t *pxb = b;
-  uint8_t *pyb = b + (uint32_t)32U;
+  uint8_t *pyb = b + 32U;
   Hacl_K256_Field_load_felem(px, pxb);
   Hacl_K256_Field_load_felem(py, pyb);
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-  z1[0U] = (uint64_t)1U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  memcpy(x1, x, 5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memset(z1, 0U, 5U * sizeof (uint64_t));
+  z1[0U] = 1ULL;
 }
 
 /**
diff --git a/src/msvc/Hacl_Ed25519.c b/src/msvc/Hacl_Ed25519.c
index f9881e91..44dc6dba 100644
--- a/src/msvc/Hacl_Ed25519.c
+++ b/src/msvc/Hacl_Ed25519.c
@@ -49,24 +49,24 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
   uint64_t f2 = a[2U];
   uint64_t f3 = a[3U];
   uint64_t f4 = a[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   a[0U] = tmp0_;
   a[1U] = tmp1 + c5;
   a[2U] = tmp2;
@@ -77,8 +77,8 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
 static inline void fmul0(uint64_t *output, uint64_t *input, uint64_t *input2)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(output, input, input2, tmp);
 }
 
@@ -89,11 +89,11 @@ static inline void times_2(uint64_t *out, uint64_t *a)
   uint64_t a2 = a[2U];
   uint64_t a3 = a[3U];
   uint64_t a4 = a[4U];
-  uint64_t o0 = (uint64_t)2U * a0;
-  uint64_t o1 = (uint64_t)2U * a1;
-  uint64_t o2 = (uint64_t)2U * a2;
-  uint64_t o3 = (uint64_t)2U * a3;
-  uint64_t o4 = (uint64_t)2U * a4;
+  uint64_t o0 = 2ULL * a0;
+  uint64_t o1 = 2ULL * a1;
+  uint64_t o2 = 2ULL * a2;
+  uint64_t o3 = 2ULL * a3;
+  uint64_t o4 = 2ULL * a4;
   out[0U] = o0;
   out[1U] = o1;
   out[2U] = o2;
@@ -104,54 +104,54 @@ static inline void times_2(uint64_t *out, uint64_t *a)
 static inline void times_d(uint64_t *out, uint64_t *a)
 {
   uint64_t d[5U] = { 0U };
-  d[0U] = (uint64_t)0x00034dca135978a3U;
-  d[1U] = (uint64_t)0x0001a8283b156ebdU;
-  d[2U] = (uint64_t)0x0005e7a26001c029U;
-  d[3U] = (uint64_t)0x000739c663a03cbbU;
-  d[4U] = (uint64_t)0x00052036cee2b6ffU;
+  d[0U] = 0x00034dca135978a3ULL;
+  d[1U] = 0x0001a8283b156ebdULL;
+  d[2U] = 0x0005e7a26001c029ULL;
+  d[3U] = 0x000739c663a03cbbULL;
+  d[4U] = 0x00052036cee2b6ffULL;
   fmul0(out, d, a);
 }
 
 static inline void times_2d(uint64_t *out, uint64_t *a)
 {
   uint64_t d2[5U] = { 0U };
-  d2[0U] = (uint64_t)0x00069b9426b2f159U;
-  d2[1U] = (uint64_t)0x00035050762add7aU;
-  d2[2U] = (uint64_t)0x0003cf44c0038052U;
-  d2[3U] = (uint64_t)0x0006738cc7407977U;
-  d2[4U] = (uint64_t)0x0002406d9dc56dffU;
+  d2[0U] = 0x00069b9426b2f159ULL;
+  d2[1U] = 0x00035050762add7aULL;
+  d2[2U] = 0x0003cf44c0038052ULL;
+  d2[3U] = 0x0006738cc7407977ULL;
+  d2[4U] = 0x0002406d9dc56dffULL;
   fmul0(out, d2, a);
 }
 
 static inline void fsquare(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
 static inline void fsquare_times(uint64_t *output, uint64_t *input, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, input, tmp, count);
 }
 
 static inline void fsquare_times_inplace(uint64_t *output, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, output, tmp, count);
 }
 
 void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(out, a, tmp);
 }
 
@@ -162,40 +162,40 @@ static inline void reduce(uint64_t *out)
   uint64_t o2 = out[2U];
   uint64_t o3 = out[3U];
   uint64_t o4 = out[4U];
-  uint64_t l_ = o0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = o0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = o1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = o2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = o3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = o4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f0 = tmp0_;
   uint64_t f1 = tmp1 + c5;
   uint64_t f2 = tmp2;
   uint64_t f3 = tmp3;
   uint64_t f4 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f4, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f4, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f4 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f4 - (mask & 0x7ffffffffffffULL);
   uint64_t f01 = f0_;
   uint64_t f11 = f1_;
   uint64_t f21 = f2_;
@@ -212,45 +212,41 @@ void Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = u64s;
-    uint8_t *bj = input + i * (uint32_t)8U;
+    uint8_t *bj = input + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t u64s3 = u64s[3U];
-  u64s[3U] = u64s3 & (uint64_t)0x7fffffffffffffffU;
-  output[0U] = u64s[0U] & (uint64_t)0x7ffffffffffffU;
-  output[1U] = u64s[0U] >> (uint32_t)51U | (u64s[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  output[2U] = u64s[1U] >> (uint32_t)38U | (u64s[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  output[3U] = u64s[2U] >> (uint32_t)25U | (u64s[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  output[4U] = u64s[3U] >> (uint32_t)12U;
+  u64s[3U] = u64s3 & 0x7fffffffffffffffULL;
+  output[0U] = u64s[0U] & 0x7ffffffffffffULL;
+  output[1U] = u64s[0U] >> 51U | (u64s[1U] & 0x3fffffffffULL) << 13U;
+  output[2U] = u64s[1U] >> 38U | (u64s[2U] & 0x1ffffffULL) << 26U;
+  output[3U] = u64s[2U] >> 25U | (u64s[3U] & 0xfffULL) << 39U;
+  output[4U] = u64s[3U] >> 12U;
 }
 
 void Hacl_Bignum25519_store_51(uint8_t *output, uint64_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, input);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(output + i * (uint32_t)8U, u64s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(output + i * 8U, u64s[i]););
 }
 
 void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x10 = p;
-  uint64_t *y10 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y10 = p + 5U;
+  uint64_t *z1 = p + 10U;
   fsquare(tmp1, x10);
   fsquare(tmp20, y10);
   fsum(tmp30, tmp1, tmp20);
@@ -258,11 +254,11 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   fsquare(tmp1, z1);
   times_2(tmp1, tmp1);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   fsum(tmp2, x1, y1);
   fsquare(tmp2, tmp2);
   Hacl_Bignum25519_reduce_513(tmp3);
@@ -271,13 +267,13 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   Hacl_Bignum25519_reduce_513(tmp4);
   fsum(tmp10, tmp10, tmp4);
   uint64_t *tmp_f = tmp;
-  uint64_t *tmp_e = tmp + (uint32_t)5U;
-  uint64_t *tmp_h = tmp + (uint32_t)10U;
-  uint64_t *tmp_g = tmp + (uint32_t)15U;
+  uint64_t *tmp_e = tmp + 5U;
+  uint64_t *tmp_h = tmp + 10U;
+  uint64_t *tmp_g = tmp + 15U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -288,13 +284,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 {
   uint64_t tmp[30U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
+  uint64_t *y2 = q + 5U;
   fdifference(tmp1, y1, x1);
   fdifference(tmp20, y2, x2);
   fmul0(tmp30, tmp1, tmp20);
@@ -302,15 +298,15 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp20, y2, x2);
   fmul0(tmp40, tmp1, tmp20);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
-  uint64_t *tmp5 = tmp + (uint32_t)20U;
-  uint64_t *tmp6 = tmp + (uint32_t)25U;
-  uint64_t *z1 = p + (uint32_t)10U;
-  uint64_t *t1 = p + (uint32_t)15U;
-  uint64_t *z2 = q + (uint32_t)10U;
-  uint64_t *t2 = q + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
+  uint64_t *tmp5 = tmp + 20U;
+  uint64_t *tmp6 = tmp + 25U;
+  uint64_t *z1 = p + 10U;
+  uint64_t *t1 = p + 15U;
+  uint64_t *z2 = q + 10U;
+  uint64_t *t2 = q + 15U;
   times_2d(tmp10, t1);
   fmul0(tmp10, tmp10, t2);
   times_2(tmp2, z1);
@@ -320,13 +316,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp10, tmp2, tmp10);
   fsum(tmp2, tmp4, tmp3);
   uint64_t *tmp_g = tmp;
-  uint64_t *tmp_h = tmp + (uint32_t)5U;
-  uint64_t *tmp_e = tmp + (uint32_t)20U;
-  uint64_t *tmp_f = tmp + (uint32_t)25U;
+  uint64_t *tmp_h = tmp + 5U;
+  uint64_t *tmp_e = tmp + 20U;
+  uint64_t *tmp_f = tmp + 25U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -336,64 +332,64 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 void Hacl_Impl_Ed25519_PointConstants_make_point_inf(uint64_t *b)
 {
   uint64_t *x = b;
-  uint64_t *y = b + (uint32_t)5U;
-  uint64_t *z = b + (uint32_t)10U;
-  uint64_t *t = b + (uint32_t)15U;
-  x[0U] = (uint64_t)0U;
-  x[1U] = (uint64_t)0U;
-  x[2U] = (uint64_t)0U;
-  x[3U] = (uint64_t)0U;
-  x[4U] = (uint64_t)0U;
-  y[0U] = (uint64_t)1U;
-  y[1U] = (uint64_t)0U;
-  y[2U] = (uint64_t)0U;
-  y[3U] = (uint64_t)0U;
-  y[4U] = (uint64_t)0U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  t[0U] = (uint64_t)0U;
-  t[1U] = (uint64_t)0U;
-  t[2U] = (uint64_t)0U;
-  t[3U] = (uint64_t)0U;
-  t[4U] = (uint64_t)0U;
+  uint64_t *y = b + 5U;
+  uint64_t *z = b + 10U;
+  uint64_t *t = b + 15U;
+  x[0U] = 0ULL;
+  x[1U] = 0ULL;
+  x[2U] = 0ULL;
+  x[3U] = 0ULL;
+  x[4U] = 0ULL;
+  y[0U] = 1ULL;
+  y[1U] = 0ULL;
+  y[2U] = 0ULL;
+  y[3U] = 0ULL;
+  y[4U] = 0ULL;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  t[0U] = 0ULL;
+  t[1U] = 0ULL;
+  t[2U] = 0ULL;
+  t[3U] = 0ULL;
+  t[4U] = 0ULL;
 }
 
 static inline void pow2_252m2(uint64_t *out, uint64_t *z)
 {
   uint64_t buf[20U] = { 0U };
   uint64_t *a = buf;
-  uint64_t *t00 = buf + (uint32_t)5U;
-  uint64_t *b0 = buf + (uint32_t)10U;
-  uint64_t *c0 = buf + (uint32_t)15U;
-  fsquare_times(a, z, (uint32_t)1U);
-  fsquare_times(t00, a, (uint32_t)2U);
+  uint64_t *t00 = buf + 5U;
+  uint64_t *b0 = buf + 10U;
+  uint64_t *c0 = buf + 15U;
+  fsquare_times(a, z, 1U);
+  fsquare_times(t00, a, 2U);
   fmul0(b0, t00, z);
   fmul0(a, b0, a);
-  fsquare_times(t00, a, (uint32_t)1U);
+  fsquare_times(t00, a, 1U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)5U);
+  fsquare_times(t00, b0, 5U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)10U);
+  fsquare_times(t00, b0, 10U);
   fmul0(c0, t00, b0);
-  fsquare_times(t00, c0, (uint32_t)20U);
+  fsquare_times(t00, c0, 20U);
   fmul0(t00, t00, c0);
-  fsquare_times_inplace(t00, (uint32_t)10U);
+  fsquare_times_inplace(t00, 10U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)50U);
+  fsquare_times(t00, b0, 50U);
   uint64_t *a0 = buf;
-  uint64_t *t0 = buf + (uint32_t)5U;
-  uint64_t *b = buf + (uint32_t)10U;
-  uint64_t *c = buf + (uint32_t)15U;
-  fsquare_times(a0, z, (uint32_t)1U);
+  uint64_t *t0 = buf + 5U;
+  uint64_t *b = buf + 10U;
+  uint64_t *c = buf + 15U;
+  fsquare_times(a0, z, 1U);
   fmul0(c, t0, b);
-  fsquare_times(t0, c, (uint32_t)100U);
+  fsquare_times(t0, c, 100U);
   fmul0(t0, t0, c);
-  fsquare_times_inplace(t0, (uint32_t)50U);
+  fsquare_times_inplace(t0, 50U);
   fmul0(t0, t0, b);
-  fsquare_times_inplace(t0, (uint32_t)2U);
+  fsquare_times_inplace(t0, 2U);
   fmul0(out, t0, a0);
 }
 
@@ -404,23 +400,17 @@ static inline bool is_0(uint64_t *x)
   uint64_t x2 = x[2U];
   uint64_t x3 = x[3U];
   uint64_t x4 = x[4U];
-  return
-    x0
-    == (uint64_t)0U
-    && x1 == (uint64_t)0U
-    && x2 == (uint64_t)0U
-    && x3 == (uint64_t)0U
-    && x4 == (uint64_t)0U;
+  return x0 == 0ULL && x1 == 0ULL && x2 == 0ULL && x3 == 0ULL && x4 == 0ULL;
 }
 
 static inline void mul_modp_sqrt_m1(uint64_t *x)
 {
   uint64_t sqrt_m1[5U] = { 0U };
-  sqrt_m1[0U] = (uint64_t)0x00061b274a0ea0b0U;
-  sqrt_m1[1U] = (uint64_t)0x0000d5a5fc8f189dU;
-  sqrt_m1[2U] = (uint64_t)0x0007ef5e9cbd0c60U;
-  sqrt_m1[3U] = (uint64_t)0x00078595a6804c9eU;
-  sqrt_m1[4U] = (uint64_t)0x0002b8324804fc1dU;
+  sqrt_m1[0U] = 0x00061b274a0ea0b0ULL;
+  sqrt_m1[1U] = 0x0000d5a5fc8f189dULL;
+  sqrt_m1[2U] = 0x0007ef5e9cbd0c60ULL;
+  sqrt_m1[3U] = 0x00078595a6804c9eULL;
+  sqrt_m1[4U] = 0x0002b8324804fc1dULL;
   fmul0(x, x, sqrt_m1);
 }
 
@@ -436,11 +426,11 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   bool
   b =
     x00
-    >= (uint64_t)0x7ffffffffffedU
-    && x1 == (uint64_t)0x7ffffffffffffU
-    && x21 == (uint64_t)0x7ffffffffffffU
-    && x30 == (uint64_t)0x7ffffffffffffU
-    && x4 == (uint64_t)0x7ffffffffffffU;
+    >= 0x7ffffffffffedULL
+    && x1 == 0x7ffffffffffffULL
+    && x21 == 0x7ffffffffffffULL
+    && x30 == 0x7ffffffffffffULL
+    && x4 == 0x7ffffffffffffULL;
   bool res;
   if (b)
   {
@@ -450,14 +440,14 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   {
     uint64_t tmp1[20U] = { 0U };
     uint64_t *one = tmp1;
-    uint64_t *y2 = tmp1 + (uint32_t)5U;
-    uint64_t *dyyi = tmp1 + (uint32_t)10U;
-    uint64_t *dyy = tmp1 + (uint32_t)15U;
-    one[0U] = (uint64_t)1U;
-    one[1U] = (uint64_t)0U;
-    one[2U] = (uint64_t)0U;
-    one[3U] = (uint64_t)0U;
-    one[4U] = (uint64_t)0U;
+    uint64_t *y2 = tmp1 + 5U;
+    uint64_t *dyyi = tmp1 + 10U;
+    uint64_t *dyy = tmp1 + 15U;
+    one[0U] = 1ULL;
+    one[1U] = 0ULL;
+    one[2U] = 0ULL;
+    one[3U] = 0ULL;
+    one[4U] = 0ULL;
     fsquare(y2, y);
     times_d(dyy, y2);
     fsum(dyy, dyy, one);
@@ -470,37 +460,37 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
     uint8_t z;
     if (x2_is_0)
     {
-      if (sign == (uint64_t)0U)
+      if (sign == 0ULL)
       {
-        x[0U] = (uint64_t)0U;
-        x[1U] = (uint64_t)0U;
-        x[2U] = (uint64_t)0U;
-        x[3U] = (uint64_t)0U;
-        x[4U] = (uint64_t)0U;
-        z = (uint8_t)1U;
+        x[0U] = 0ULL;
+        x[1U] = 0ULL;
+        x[2U] = 0ULL;
+        x[3U] = 0ULL;
+        x[4U] = 0ULL;
+        z = 1U;
       }
       else
       {
-        z = (uint8_t)0U;
+        z = 0U;
       }
     }
     else
     {
-      z = (uint8_t)2U;
+      z = 2U;
     }
-    if (z == (uint8_t)0U)
+    if (z == 0U)
     {
       res = false;
     }
-    else if (z == (uint8_t)1U)
+    else if (z == 1U)
     {
       res = true;
     }
     else
     {
       uint64_t *x210 = tmp;
-      uint64_t *x31 = tmp + (uint32_t)5U;
-      uint64_t *t00 = tmp + (uint32_t)10U;
+      uint64_t *x31 = tmp + 5U;
+      uint64_t *t00 = tmp + 10U;
       pow2_252m2(x31, x210);
       fsquare(t00, x31);
       fdifference(t00, t00, x210);
@@ -512,8 +502,8 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
         mul_modp_sqrt_m1(x31);
       }
       uint64_t *x211 = tmp;
-      uint64_t *x3 = tmp + (uint32_t)5U;
-      uint64_t *t01 = tmp + (uint32_t)10U;
+      uint64_t *x3 = tmp + 5U;
+      uint64_t *t01 = tmp + 10U;
       fsquare(t01, x3);
       fdifference(t01, t01, x211);
       Hacl_Bignum25519_reduce_513(t01);
@@ -525,23 +515,23 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
       }
       else
       {
-        uint64_t *x32 = tmp + (uint32_t)5U;
-        uint64_t *t0 = tmp + (uint32_t)10U;
+        uint64_t *x32 = tmp + 5U;
+        uint64_t *t0 = tmp + 10U;
         reduce(x32);
         uint64_t x0 = x32[0U];
-        uint64_t x01 = x0 & (uint64_t)1U;
+        uint64_t x01 = x0 & 1ULL;
         if (!(x01 == sign))
         {
-          t0[0U] = (uint64_t)0U;
-          t0[1U] = (uint64_t)0U;
-          t0[2U] = (uint64_t)0U;
-          t0[3U] = (uint64_t)0U;
-          t0[4U] = (uint64_t)0U;
+          t0[0U] = 0ULL;
+          t0[1U] = 0ULL;
+          t0[2U] = 0ULL;
+          t0[3U] = 0ULL;
+          t0[4U] = 0ULL;
           fdifference(x32, t0, x32);
           Hacl_Bignum25519_reduce_513(x32);
           reduce(x32);
         }
-        memcpy(x, x32, (uint32_t)5U * sizeof (uint64_t));
+        memcpy(x, x32, 5U * sizeof (uint64_t));
         res = true;
       }
     }
@@ -554,9 +544,9 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 {
   uint64_t tmp[10U] = { 0U };
   uint64_t *y = tmp;
-  uint64_t *x = tmp + (uint32_t)5U;
+  uint64_t *x = tmp + 5U;
   uint8_t s31 = s[31U];
-  uint8_t z = s31 >> (uint32_t)7U;
+  uint8_t z = (uint32_t)s31 >> 7U;
   uint64_t sign = (uint64_t)z;
   Hacl_Bignum25519_load_51(y, s);
   bool z0 = recover_x(x, y, sign);
@@ -568,16 +558,16 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
   else
   {
     uint64_t *outx = out;
-    uint64_t *outy = out + (uint32_t)5U;
-    uint64_t *outz = out + (uint32_t)10U;
-    uint64_t *outt = out + (uint32_t)15U;
-    memcpy(outx, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(outy, y, (uint32_t)5U * sizeof (uint64_t));
-    outz[0U] = (uint64_t)1U;
-    outz[1U] = (uint64_t)0U;
-    outz[2U] = (uint64_t)0U;
-    outz[3U] = (uint64_t)0U;
-    outz[4U] = (uint64_t)0U;
+    uint64_t *outy = out + 5U;
+    uint64_t *outz = out + 10U;
+    uint64_t *outt = out + 15U;
+    memcpy(outx, x, 5U * sizeof (uint64_t));
+    memcpy(outy, y, 5U * sizeof (uint64_t));
+    outz[0U] = 1ULL;
+    outz[1U] = 0ULL;
+    outz[2U] = 0ULL;
+    outz[3U] = 0ULL;
+    outz[4U] = 0ULL;
     fmul0(outt, x, y);
     res = true;
   }
@@ -588,25 +578,25 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p)
 {
   uint64_t tmp[15U] = { 0U };
-  uint64_t *x = tmp + (uint32_t)5U;
-  uint64_t *out = tmp + (uint32_t)10U;
+  uint64_t *x = tmp + 5U;
+  uint64_t *out = tmp + 10U;
   uint64_t *zinv1 = tmp;
-  uint64_t *x1 = tmp + (uint32_t)5U;
-  uint64_t *out1 = tmp + (uint32_t)10U;
+  uint64_t *x1 = tmp + 5U;
+  uint64_t *out1 = tmp + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   Hacl_Bignum25519_inverse(zinv1, pz);
   fmul0(x1, px, zinv1);
   reduce(x1);
   fmul0(out1, py, zinv1);
   Hacl_Bignum25519_reduce_513(out1);
   uint64_t x0 = x[0U];
-  uint64_t b = x0 & (uint64_t)1U;
+  uint64_t b = x0 & 1ULL;
   Hacl_Bignum25519_store_51(z, out);
   uint8_t xbyte = (uint8_t)b;
   uint8_t o31 = z[31U];
-  z[31U] = o31 + (xbyte << (uint32_t)7U);
+  z[31U] = (uint32_t)o31 + ((uint32_t)xbyte << 7U);
 }
 
 static inline void barrett_reduction(uint64_t *z, uint64_t *t)
@@ -621,40 +611,40 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t t7 = t[7U];
   uint64_t t8 = t[8U];
   uint64_t t9 = t[9U];
-  uint64_t m00 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m10 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m20 = (uint64_t)0x000000000014deU;
-  uint64_t m30 = (uint64_t)0x00000000000000U;
-  uint64_t m40 = (uint64_t)0x00000010000000U;
+  uint64_t m00 = 0x12631a5cf5d3edULL;
+  uint64_t m10 = 0xf9dea2f79cd658ULL;
+  uint64_t m20 = 0x000000000014deULL;
+  uint64_t m30 = 0x00000000000000ULL;
+  uint64_t m40 = 0x00000010000000ULL;
   uint64_t m0 = m00;
   uint64_t m1 = m10;
   uint64_t m2 = m20;
   uint64_t m3 = m30;
   uint64_t m4 = m40;
-  uint64_t m010 = (uint64_t)0x9ce5a30a2c131bU;
-  uint64_t m110 = (uint64_t)0x215d086329a7edU;
-  uint64_t m210 = (uint64_t)0xffffffffeb2106U;
-  uint64_t m310 = (uint64_t)0xffffffffffffffU;
-  uint64_t m410 = (uint64_t)0x00000fffffffffU;
+  uint64_t m010 = 0x9ce5a30a2c131bULL;
+  uint64_t m110 = 0x215d086329a7edULL;
+  uint64_t m210 = 0xffffffffeb2106ULL;
+  uint64_t m310 = 0xffffffffffffffULL;
+  uint64_t m410 = 0x00000fffffffffULL;
   uint64_t mu0 = m010;
   uint64_t mu1 = m110;
   uint64_t mu2 = m210;
   uint64_t mu3 = m310;
   uint64_t mu4 = m410;
-  uint64_t y_ = (t5 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_ = t4 >> (uint32_t)24U;
+  uint64_t y_ = (t5 & 0xffffffULL) << 32U;
+  uint64_t x_ = t4 >> 24U;
   uint64_t z00 = x_ | y_;
-  uint64_t y_0 = (t6 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_0 = t5 >> (uint32_t)24U;
+  uint64_t y_0 = (t6 & 0xffffffULL) << 32U;
+  uint64_t x_0 = t5 >> 24U;
   uint64_t z10 = x_0 | y_0;
-  uint64_t y_1 = (t7 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_1 = t6 >> (uint32_t)24U;
+  uint64_t y_1 = (t7 & 0xffffffULL) << 32U;
+  uint64_t x_1 = t6 >> 24U;
   uint64_t z20 = x_1 | y_1;
-  uint64_t y_2 = (t8 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_2 = t7 >> (uint32_t)24U;
+  uint64_t y_2 = (t8 & 0xffffffULL) << 32U;
+  uint64_t x_2 = t7 >> 24U;
   uint64_t z30 = x_2 | y_2;
-  uint64_t y_3 = (t9 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_3 = t8 >> (uint32_t)24U;
+  uint64_t y_3 = (t9 & 0xffffffULL) << 32U;
+  uint64_t x_3 = t8 >> 24U;
   uint64_t z40 = x_3 | y_3;
   uint64_t q0 = z00;
   uint64_t q1 = z10;
@@ -707,55 +697,37 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 z6 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z7 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z8 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, (uint32_t)56U);
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, 56U);
   FStar_UInt128_uint128 c00 = carry0;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), 56U);
   FStar_UInt128_uint128 c10 = carry1;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), 56U);
   FStar_UInt128_uint128 c20 = carry2;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), 56U);
   FStar_UInt128_uint128 c30 = carry3;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), 56U);
   uint64_t
-  t100 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30))
-    & (uint64_t)0xffffffffffffffU;
+  t100 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c40 = carry4;
   uint64_t t410 = t100;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), 56U);
   uint64_t
-  t101 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40))
-    & (uint64_t)0xffffffffffffffU;
+  t101 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t51 = t101;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), 56U);
   uint64_t
-  t102 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t102 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t61 = t102;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), 56U);
   uint64_t
-  t103 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t103 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t71 = t103;
-  FStar_UInt128_uint128
-  carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), 56U);
   uint64_t
-  t104 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t104 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry8;
   uint64_t t81 = t104;
   uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8);
@@ -765,20 +737,20 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t qmu7_ = t71;
   uint64_t qmu8_ = t81;
   uint64_t qmu9_ = t91;
-  uint64_t y_4 = (qmu5_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_4 = qmu4_ >> (uint32_t)40U;
+  uint64_t y_4 = (qmu5_ & 0xffffffffffULL) << 16U;
+  uint64_t x_4 = qmu4_ >> 40U;
   uint64_t z02 = x_4 | y_4;
-  uint64_t y_5 = (qmu6_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_5 = qmu5_ >> (uint32_t)40U;
+  uint64_t y_5 = (qmu6_ & 0xffffffffffULL) << 16U;
+  uint64_t x_5 = qmu5_ >> 40U;
   uint64_t z12 = x_5 | y_5;
-  uint64_t y_6 = (qmu7_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_6 = qmu6_ >> (uint32_t)40U;
+  uint64_t y_6 = (qmu7_ & 0xffffffffffULL) << 16U;
+  uint64_t x_6 = qmu6_ >> 40U;
   uint64_t z22 = x_6 | y_6;
-  uint64_t y_7 = (qmu8_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_7 = qmu7_ >> (uint32_t)40U;
+  uint64_t y_7 = (qmu8_ & 0xffffffffffULL) << 16U;
+  uint64_t x_7 = qmu7_ >> 40U;
   uint64_t z32 = x_7 | y_7;
-  uint64_t y_8 = (qmu9_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_8 = qmu8_ >> (uint32_t)40U;
+  uint64_t y_8 = (qmu9_ & 0xffffffffffULL) << 16U;
+  uint64_t x_8 = qmu8_ >> 40U;
   uint64_t z42 = x_8 | y_8;
   uint64_t qdiv0 = z02;
   uint64_t qdiv1 = z12;
@@ -789,7 +761,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
-  uint64_t r4 = t4 & (uint64_t)0xffffffffffU;
+  uint64_t r4 = t4 & 0xffffffffffULL;
   FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(qdiv0, m0);
   FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(qdiv0, m1);
   FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(qdiv0, m2);
@@ -805,18 +777,18 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(qdiv3, m0);
   FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1);
   FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0);
-  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U);
-  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, 56U);
+  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry9;
   uint64_t t010 = t105;
   FStar_UInt128_uint128
   carry10 =
     FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t106 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c11 = carry10;
   uint64_t t110 = t106;
   FStar_UInt128_uint128
@@ -825,14 +797,14 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy11),
           xy20),
         c11),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t107 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02,
             xy11),
           xy20),
         c11))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c21 = carry11;
   uint64_t t210 = t107;
   FStar_UInt128_uint128
@@ -842,7 +814,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t108 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03,
@@ -850,7 +822,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c31 = carry;
   uint64_t t310 = t108;
   uint64_t
@@ -861,67 +833,67 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy31),
           xy40),
         c31))
-    & (uint64_t)0xffffffffffU;
+    & 0xffffffffffULL;
   uint64_t qmul0 = t010;
   uint64_t qmul1 = t110;
   uint64_t qmul2 = t210;
   uint64_t qmul3 = t310;
   uint64_t qmul4 = t411;
-  uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U;
-  uint64_t t109 = (b5 << (uint32_t)56U) + r0 - qmul0;
+  uint64_t b5 = (r0 - qmul0) >> 63U;
+  uint64_t t109 = (b5 << 56U) + r0 - qmul0;
   uint64_t c1 = b5;
   uint64_t t011 = t109;
-  uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U;
-  uint64_t t1010 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1);
+  uint64_t b6 = (r1 - (qmul1 + c1)) >> 63U;
+  uint64_t t1010 = (b6 << 56U) + r1 - (qmul1 + c1);
   uint64_t c2 = b6;
   uint64_t t111 = t1010;
-  uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U;
-  uint64_t t1011 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2);
+  uint64_t b7 = (r2 - (qmul2 + c2)) >> 63U;
+  uint64_t t1011 = (b7 << 56U) + r2 - (qmul2 + c2);
   uint64_t c3 = b7;
   uint64_t t211 = t1011;
-  uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U;
-  uint64_t t1012 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3);
+  uint64_t b8 = (r3 - (qmul3 + c3)) >> 63U;
+  uint64_t t1012 = (b8 << 56U) + r3 - (qmul3 + c3);
   uint64_t c4 = b8;
   uint64_t t311 = t1012;
-  uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U;
-  uint64_t t1013 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4);
+  uint64_t b9 = (r4 - (qmul4 + c4)) >> 63U;
+  uint64_t t1013 = (b9 << 40U) + r4 - (qmul4 + c4);
   uint64_t t412 = t1013;
   uint64_t s0 = t011;
   uint64_t s1 = t111;
   uint64_t s2 = t211;
   uint64_t s3 = t311;
   uint64_t s4 = t412;
-  uint64_t m01 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m11 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m21 = (uint64_t)0x000000000014deU;
-  uint64_t m31 = (uint64_t)0x00000000000000U;
-  uint64_t m41 = (uint64_t)0x00000010000000U;
+  uint64_t m01 = 0x12631a5cf5d3edULL;
+  uint64_t m11 = 0xf9dea2f79cd658ULL;
+  uint64_t m21 = 0x000000000014deULL;
+  uint64_t m31 = 0x00000000000000ULL;
+  uint64_t m41 = 0x00000010000000ULL;
   uint64_t y0 = m01;
   uint64_t y1 = m11;
   uint64_t y2 = m21;
   uint64_t y3 = m31;
   uint64_t y4 = m41;
-  uint64_t b10 = (s0 - y0) >> (uint32_t)63U;
-  uint64_t t1014 = (b10 << (uint32_t)56U) + s0 - y0;
+  uint64_t b10 = (s0 - y0) >> 63U;
+  uint64_t t1014 = (b10 << 56U) + s0 - y0;
   uint64_t b0 = b10;
   uint64_t t01 = t1014;
-  uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U;
-  uint64_t t1015 = (b11 << (uint32_t)56U) + s1 - (y1 + b0);
+  uint64_t b11 = (s1 - (y1 + b0)) >> 63U;
+  uint64_t t1015 = (b11 << 56U) + s1 - (y1 + b0);
   uint64_t b1 = b11;
   uint64_t t11 = t1015;
-  uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U;
-  uint64_t t1016 = (b12 << (uint32_t)56U) + s2 - (y2 + b1);
+  uint64_t b12 = (s2 - (y2 + b1)) >> 63U;
+  uint64_t t1016 = (b12 << 56U) + s2 - (y2 + b1);
   uint64_t b2 = b12;
   uint64_t t21 = t1016;
-  uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U;
-  uint64_t t1017 = (b13 << (uint32_t)56U) + s3 - (y3 + b2);
+  uint64_t b13 = (s3 - (y3 + b2)) >> 63U;
+  uint64_t t1017 = (b13 << 56U) + s3 - (y3 + b2);
   uint64_t b3 = b13;
   uint64_t t31 = t1017;
-  uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U;
-  uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3);
+  uint64_t b = (s4 - (y4 + b3)) >> 63U;
+  uint64_t t10 = (b << 56U) + s4 - (y4 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t10;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z03 = s0 ^ (mask & (s0 ^ t01));
   uint64_t z13 = s1 ^ (mask & (s1 ^ t11));
   uint64_t z23 = s2 ^ (mask & (s2 ^ t21));
@@ -1008,72 +980,48 @@ static inline void mul_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   FStar_UInt128_uint128 z60 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z70 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z80 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, (uint32_t)56U);
-  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, 56U);
+  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry0;
   uint64_t t0 = t10;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), 56U);
   uint64_t
-  t11 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0))
-    & (uint64_t)0xffffffffffffffU;
+  t11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c1 = carry1;
   uint64_t t1 = t11;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), 56U);
   uint64_t
-  t12 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1))
-    & (uint64_t)0xffffffffffffffU;
+  t12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c2 = carry2;
   uint64_t t2 = t12;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), 56U);
   uint64_t
-  t13 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2))
-    & (uint64_t)0xffffffffffffffU;
+  t13 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c3 = carry3;
   uint64_t t3 = t13;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), 56U);
   uint64_t
-  t14 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3))
-    & (uint64_t)0xffffffffffffffU;
+  t14 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c4 = carry4;
   uint64_t t4 = t14;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), 56U);
   uint64_t
-  t15 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4))
-    & (uint64_t)0xffffffffffffffU;
+  t15 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t5 = t15;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), 56U);
   uint64_t
-  t16 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t16 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t6 = t16;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), 56U);
   uint64_t
-  t17 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t17 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t7 = t17;
-  FStar_UInt128_uint128
-  carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), 56U);
   uint64_t
-  t =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry;
   uint64_t t8 = t;
   uint64_t t9 = FStar_UInt128_uint128_to_uint64(c8);
@@ -1112,54 +1060,54 @@ static inline void add_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   uint64_t y2 = y[2U];
   uint64_t y3 = y[3U];
   uint64_t y4 = y[4U];
-  uint64_t carry0 = (x0 + y0) >> (uint32_t)56U;
-  uint64_t t0 = (x0 + y0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry0 = (x0 + y0) >> 56U;
+  uint64_t t0 = (x0 + y0) & 0xffffffffffffffULL;
   uint64_t t00 = t0;
   uint64_t c0 = carry0;
-  uint64_t carry1 = (x1 + y1 + c0) >> (uint32_t)56U;
-  uint64_t t1 = (x1 + y1 + c0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry1 = (x1 + y1 + c0) >> 56U;
+  uint64_t t1 = (x1 + y1 + c0) & 0xffffffffffffffULL;
   uint64_t t10 = t1;
   uint64_t c1 = carry1;
-  uint64_t carry2 = (x2 + y2 + c1) >> (uint32_t)56U;
-  uint64_t t2 = (x2 + y2 + c1) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry2 = (x2 + y2 + c1) >> 56U;
+  uint64_t t2 = (x2 + y2 + c1) & 0xffffffffffffffULL;
   uint64_t t20 = t2;
   uint64_t c2 = carry2;
-  uint64_t carry = (x3 + y3 + c2) >> (uint32_t)56U;
-  uint64_t t3 = (x3 + y3 + c2) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry = (x3 + y3 + c2) >> 56U;
+  uint64_t t3 = (x3 + y3 + c2) & 0xffffffffffffffULL;
   uint64_t t30 = t3;
   uint64_t c3 = carry;
   uint64_t t4 = x4 + y4 + c3;
-  uint64_t m0 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m1 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m2 = (uint64_t)0x000000000014deU;
-  uint64_t m3 = (uint64_t)0x00000000000000U;
-  uint64_t m4 = (uint64_t)0x00000010000000U;
+  uint64_t m0 = 0x12631a5cf5d3edULL;
+  uint64_t m1 = 0xf9dea2f79cd658ULL;
+  uint64_t m2 = 0x000000000014deULL;
+  uint64_t m3 = 0x00000000000000ULL;
+  uint64_t m4 = 0x00000010000000ULL;
   uint64_t y01 = m0;
   uint64_t y11 = m1;
   uint64_t y21 = m2;
   uint64_t y31 = m3;
   uint64_t y41 = m4;
-  uint64_t b5 = (t00 - y01) >> (uint32_t)63U;
-  uint64_t t5 = (b5 << (uint32_t)56U) + t00 - y01;
+  uint64_t b5 = (t00 - y01) >> 63U;
+  uint64_t t5 = (b5 << 56U) + t00 - y01;
   uint64_t b0 = b5;
   uint64_t t01 = t5;
-  uint64_t b6 = (t10 - (y11 + b0)) >> (uint32_t)63U;
-  uint64_t t6 = (b6 << (uint32_t)56U) + t10 - (y11 + b0);
+  uint64_t b6 = (t10 - (y11 + b0)) >> 63U;
+  uint64_t t6 = (b6 << 56U) + t10 - (y11 + b0);
   uint64_t b1 = b6;
   uint64_t t11 = t6;
-  uint64_t b7 = (t20 - (y21 + b1)) >> (uint32_t)63U;
-  uint64_t t7 = (b7 << (uint32_t)56U) + t20 - (y21 + b1);
+  uint64_t b7 = (t20 - (y21 + b1)) >> 63U;
+  uint64_t t7 = (b7 << 56U) + t20 - (y21 + b1);
   uint64_t b2 = b7;
   uint64_t t21 = t7;
-  uint64_t b8 = (t30 - (y31 + b2)) >> (uint32_t)63U;
-  uint64_t t8 = (b8 << (uint32_t)56U) + t30 - (y31 + b2);
+  uint64_t b8 = (t30 - (y31 + b2)) >> 63U;
+  uint64_t t8 = (b8 << 56U) + t30 - (y31 + b2);
   uint64_t b3 = b8;
   uint64_t t31 = t8;
-  uint64_t b = (t4 - (y41 + b3)) >> (uint32_t)63U;
-  uint64_t t = (b << (uint32_t)56U) + t4 - (y41 + b3);
+  uint64_t b = (t4 - (y41 + b3)) >> 63U;
+  uint64_t t = (b << 56U) + t4 - (y41 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z00 = t00 ^ (mask & (t00 ^ t01));
   uint64_t z10 = t10 ^ (mask & (t10 ^ t11));
   uint64_t z20 = t20 ^ (mask & (t20 ^ t21));
@@ -1194,35 +1142,35 @@ static inline bool gte_q(uint64_t *s)
   uint64_t s2 = s[2U];
   uint64_t s3 = s[3U];
   uint64_t s4 = s[4U];
-  if (s4 > (uint64_t)0x00000010000000U)
+  if (s4 > 0x00000010000000ULL)
   {
     return true;
   }
-  if (s4 < (uint64_t)0x00000010000000U)
+  if (s4 < 0x00000010000000ULL)
   {
     return false;
   }
-  if (s3 > (uint64_t)0x00000000000000U)
+  if (s3 > 0x00000000000000ULL)
   {
     return true;
   }
-  if (s2 > (uint64_t)0x000000000014deU)
+  if (s2 > 0x000000000014deULL)
   {
     return true;
   }
-  if (s2 < (uint64_t)0x000000000014deU)
+  if (s2 < 0x000000000014deULL)
   {
     return false;
   }
-  if (s1 > (uint64_t)0xf9dea2f79cd658U)
+  if (s1 > 0xf9dea2f79cd658ULL)
   {
     return true;
   }
-  if (s1 < (uint64_t)0xf9dea2f79cd658U)
+  if (s1 < 0xf9dea2f79cd658ULL)
   {
     return false;
   }
-  if (s0 >= (uint64_t)0x12631a5cf5d3edU)
+  if (s0 >= 0x12631a5cf5d3edULL)
   {
     return true;
   }
@@ -1248,19 +1196,19 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *pxqz = tmp;
-  uint64_t *qxpz = tmp + (uint32_t)5U;
-  fmul0(pxqz, p, q + (uint32_t)10U);
+  uint64_t *qxpz = tmp + 5U;
+  fmul0(pxqz, p, q + 10U);
   reduce(pxqz);
-  fmul0(qxpz, q, p + (uint32_t)10U);
+  fmul0(qxpz, q, p + 10U);
   reduce(qxpz);
   bool b = eq(pxqz, qxpz);
   if (b)
   {
-    uint64_t *pyqz = tmp + (uint32_t)10U;
-    uint64_t *qypz = tmp + (uint32_t)15U;
-    fmul0(pyqz, p + (uint32_t)5U, q + (uint32_t)10U);
+    uint64_t *pyqz = tmp + 10U;
+    uint64_t *qypz = tmp + 15U;
+    fmul0(pyqz, p + 5U, q + 10U);
     reduce(pyqz);
-    fmul0(qypz, q + (uint32_t)5U, p + (uint32_t)10U);
+    fmul0(qypz, q + 5U, p + 10U);
     reduce(qypz);
     return eq(pyqz, qypz);
   }
@@ -1270,23 +1218,23 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 void Hacl_Impl_Ed25519_PointNegate_point_negate(uint64_t *p, uint64_t *out)
 {
   uint64_t zero[5U] = { 0U };
-  zero[0U] = (uint64_t)0U;
-  zero[1U] = (uint64_t)0U;
-  zero[2U] = (uint64_t)0U;
-  zero[3U] = (uint64_t)0U;
-  zero[4U] = (uint64_t)0U;
+  zero[0U] = 0ULL;
+  zero[1U] = 0ULL;
+  zero[2U] = 0ULL;
+  zero[3U] = 0ULL;
+  zero[4U] = 0ULL;
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
-  uint64_t *z = p + (uint32_t)10U;
-  uint64_t *t = p + (uint32_t)15U;
+  uint64_t *y = p + 5U;
+  uint64_t *z = p + 10U;
+  uint64_t *t = p + 15U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  uint64_t *t1 = out + (uint32_t)15U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  uint64_t *t1 = out + 15U;
   fdifference(x1, zero, x);
   Hacl_Bignum25519_reduce_513(x1);
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(z1, z, (uint32_t)5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memcpy(z1, z, 5U * sizeof (uint64_t));
   fdifference(t1, zero, t);
   Hacl_Bignum25519_reduce_513(t1);
 }
@@ -1295,11 +1243,11 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1307,42 +1255,34 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
   uint64_t table[320U] = { 0U };
   uint64_t tmp[20U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)20U;
+  uint64_t *t1 = table + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table + (2U * i + 2U) * 20U, tmp, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 20U, tmp, 20U * sizeof (uint64_t)););
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp0[20U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 20U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)20U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 20U;
+      for (uint32_t i = 0U; i < 20U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -1354,14 +1294,14 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)20U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 20U;
+    for (uint32_t i = 0U; i < 20U; i++)
     {
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
@@ -1373,107 +1313,97 @@ static inline void point_mul_g(uint64_t *out, uint8_t *scalar)
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t q1[20U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  uint64_t *gt = q1 + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  uint64_t *gt = q1 + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   uint64_t
   q2[20U] =
     {
-      (uint64_t)13559344787725U, (uint64_t)2051621493703448U, (uint64_t)1947659315640708U,
-      (uint64_t)626856790370168U, (uint64_t)1592804284034836U, (uint64_t)1781728767459187U,
-      (uint64_t)278818420518009U, (uint64_t)2038030359908351U, (uint64_t)910625973862690U,
-      (uint64_t)471887343142239U, (uint64_t)1298543306606048U, (uint64_t)794147365642417U,
-      (uint64_t)129968992326749U, (uint64_t)523140861678572U, (uint64_t)1166419653909231U,
-      (uint64_t)2009637196928390U, (uint64_t)1288020222395193U, (uint64_t)1007046974985829U,
-      (uint64_t)208981102651386U, (uint64_t)2074009315253380U
+      13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL, 626856790370168ULL,
+      1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL, 2038030359908351ULL,
+      910625973862690ULL, 471887343142239ULL, 1298543306606048ULL, 794147365642417ULL,
+      129968992326749ULL, 523140861678572ULL, 1166419653909231ULL, 2009637196928390ULL,
+      1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL, 2074009315253380ULL
     };
   uint64_t
   q3[20U] =
     {
-      (uint64_t)557549315715710U, (uint64_t)196756086293855U, (uint64_t)846062225082495U,
-      (uint64_t)1865068224838092U, (uint64_t)991112090754908U, (uint64_t)522916421512828U,
-      (uint64_t)2098523346722375U, (uint64_t)1135633221747012U, (uint64_t)858420432114866U,
-      (uint64_t)186358544306082U, (uint64_t)1044420411868480U, (uint64_t)2080052304349321U,
-      (uint64_t)557301814716724U, (uint64_t)1305130257814057U, (uint64_t)2126012765451197U,
-      (uint64_t)1441004402875101U, (uint64_t)353948968859203U, (uint64_t)470765987164835U,
-      (uint64_t)1507675957683570U, (uint64_t)1086650358745097U
+      557549315715710ULL, 196756086293855ULL, 846062225082495ULL, 1865068224838092ULL,
+      991112090754908ULL, 522916421512828ULL, 2098523346722375ULL, 1135633221747012ULL,
+      858420432114866ULL, 186358544306082ULL, 1044420411868480ULL, 2080052304349321ULL,
+      557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL, 1441004402875101ULL,
+      353948968859203ULL, 470765987164835ULL, 1507675957683570ULL, 1086650358745097ULL
     };
   uint64_t
   q4[20U] =
     {
-      (uint64_t)1129953239743101U, (uint64_t)1240339163956160U, (uint64_t)61002583352401U,
-      (uint64_t)2017604552196030U, (uint64_t)1576867829229863U, (uint64_t)1508654942849389U,
-      (uint64_t)270111619664077U, (uint64_t)1253097517254054U, (uint64_t)721798270973250U,
-      (uint64_t)161923365415298U, (uint64_t)828530877526011U, (uint64_t)1494851059386763U,
-      (uint64_t)662034171193976U, (uint64_t)1315349646974670U, (uint64_t)2199229517308806U,
-      (uint64_t)497078277852673U, (uint64_t)1310507715989956U, (uint64_t)1881315714002105U,
-      (uint64_t)2214039404983803U, (uint64_t)1331036420272667U
+      1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL, 2017604552196030ULL,
+      1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL, 1253097517254054ULL,
+      721798270973250ULL, 161923365415298ULL, 828530877526011ULL, 1494851059386763ULL,
+      662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL, 497078277852673ULL,
+      1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL, 1331036420272667ULL
     };
   uint64_t *r1 = bscalar;
-  uint64_t *r2 = bscalar + (uint32_t)1U;
-  uint64_t *r3 = bscalar + (uint32_t)2U;
-  uint64_t *r4 = bscalar + (uint32_t)3U;
+  uint64_t *r2 = bscalar + 1U;
+  uint64_t *r3 = bscalar + 2U;
+  uint64_t *r4 = bscalar + 3U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp[20U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp););
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1481,48 +1411,48 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *g = tmp;
-  uint64_t *bscalar1 = tmp + (uint32_t)20U;
-  uint64_t *bscalar2 = tmp + (uint32_t)24U;
+  uint64_t *bscalar1 = tmp + 20U;
+  uint64_t *bscalar2 = tmp + 24U;
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  uint64_t *gt = g + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  uint64_t *gt = g + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar1;
-    uint8_t *bj = scalar1 + i * (uint32_t)8U;
+    uint8_t *bj = scalar1 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar2;
-    uint8_t *bj = scalar2 + i * (uint32_t)8U;
+    uint8_t *bj = scalar2 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1530,58 +1460,50 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
   uint64_t table2[640U] = { 0U };
   uint64_t tmp1[20U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)20U;
+  uint64_t *t1 = table2 + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q2, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q2, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp1, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table2 + (2U * i + 2U) * 20U, tmp1, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp1, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 20U, tmp1, 20U * sizeof (uint64_t)););
   uint64_t tmp10[20U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
   const
   uint64_t
-  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)20U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)20U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, i1, (uint32_t)5U);
+  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 20U;
+  memcpy(out, (uint64_t *)a_bits_l, 20U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)20U;
-  memcpy(tmp10, (uint64_t *)a_bits_l0, (uint32_t)20U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 20U;
+  memcpy(tmp10, (uint64_t *)a_bits_l0, 20U * sizeof (uint64_t));
   Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp10);
   uint64_t tmp11[20U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l1, (uint32_t)20U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l1, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l2, (uint32_t)20U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l2, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
   }
 }
@@ -1609,13 +1531,13 @@ static inline void store_56(uint8_t *out, uint64_t *b)
   uint32_t b4_ = (uint32_t)b4;
   uint8_t *b8 = out;
   store64_le(b8, b0);
-  uint8_t *b80 = out + (uint32_t)7U;
+  uint8_t *b80 = out + 7U;
   store64_le(b80, b1);
-  uint8_t *b81 = out + (uint32_t)14U;
+  uint8_t *b81 = out + 14U;
   store64_le(b81, b2);
-  uint8_t *b82 = out + (uint32_t)21U;
+  uint8_t *b82 = out + 21U;
   store64_le(b82, b3);
-  store32_le(out + (uint32_t)28U, b4_);
+  store32_le(out + 28U, b4_);
 }
 
 static inline void load_64_bytes(uint64_t *out, uint8_t *b)
@@ -1623,39 +1545,39 @@ static inline void load_64_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u = load64_le(b80);
   uint64_t z = u;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u0 = load64_le(b81);
   uint64_t z0 = u0;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u1 = load64_le(b82);
   uint64_t z1 = u1;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b83 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b83 = b + 21U;
   uint64_t u2 = load64_le(b83);
   uint64_t z2 = u2;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b84 = b + (uint32_t)28U;
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint8_t *b84 = b + 28U;
   uint64_t u3 = load64_le(b84);
   uint64_t z3 = u3;
-  uint64_t b4 = z3 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b85 = b + (uint32_t)35U;
+  uint64_t b4 = z3 & 0xffffffffffffffULL;
+  uint8_t *b85 = b + 35U;
   uint64_t u4 = load64_le(b85);
   uint64_t z4 = u4;
-  uint64_t b5 = z4 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b86 = b + (uint32_t)42U;
+  uint64_t b5 = z4 & 0xffffffffffffffULL;
+  uint8_t *b86 = b + 42U;
   uint64_t u5 = load64_le(b86);
   uint64_t z5 = u5;
-  uint64_t b6 = z5 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b87 = b + (uint32_t)49U;
+  uint64_t b6 = z5 & 0xffffffffffffffULL;
+  uint8_t *b87 = b + 49U;
   uint64_t u6 = load64_le(b87);
   uint64_t z6 = u6;
-  uint64_t b7 = z6 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)56U;
+  uint64_t b7 = z6 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 56U;
   uint64_t u7 = load64_le(b8);
   uint64_t z7 = u7;
-  uint64_t b88 = z7 & (uint64_t)0xffffffffffffffU;
+  uint64_t b88 = z7 & 0xffffffffffffffULL;
   uint8_t b63 = b[63U];
   uint64_t b9 = (uint64_t)b63;
   out[0U] = b0;
@@ -1675,20 +1597,20 @@ static inline void load_32_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u0 = load64_le(b80);
   uint64_t z = u0;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u1 = load64_le(b81);
   uint64_t z0 = u1;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u2 = load64_le(b82);
   uint64_t z1 = u2;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 21U;
   uint64_t u3 = load64_le(b8);
   uint64_t z2 = u3;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint32_t u = load32_le(b + (uint32_t)28U);
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint32_t u = load32_le(b + 28U);
   uint32_t b4 = u;
   uint64_t b41 = (uint64_t)b4;
   out[0U] = b0;
@@ -1703,15 +1625,14 @@ static inline void sha512_pre_msg(uint8_t *hash, uint8_t *prefix, uint32_t len,
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
   Hacl_SHA2_Scalar32_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Streaming_SHA2_update_512(st, prefix, 32U);
   Hacl_Streaming_Types_error_code err1 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
   Hacl_Streaming_SHA2_finish_512(st, hash);
 }
 
@@ -1727,18 +1648,16 @@ sha512_pre_pre2_msg(
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
   Hacl_SHA2_Scalar32_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code
-  err1 = Hacl_Streaming_SHA2_update_512(st, prefix2, (uint32_t)32U);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Streaming_SHA2_update_512(st, prefix, 32U);
+  Hacl_Streaming_Types_error_code err1 = Hacl_Streaming_SHA2_update_512(st, prefix2, 32U);
   Hacl_Streaming_Types_error_code err2 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
-  KRML_HOST_IGNORE(err2);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
+  KRML_MAYBE_UNUSED_VAR(err2);
   Hacl_Streaming_SHA2_finish_512(st, hash);
 }
 
@@ -1777,12 +1696,12 @@ static inline void point_mul_g_compress(uint8_t *out, uint8_t *s)
 
 static inline void secret_expand(uint8_t *expanded, uint8_t *secret)
 {
-  Hacl_Streaming_SHA2_hash_512(secret, (uint32_t)32U, expanded);
+  Hacl_Streaming_SHA2_hash_512(secret, 32U, expanded);
   uint8_t *h_low = expanded;
   uint8_t h_low0 = h_low[0U];
   uint8_t h_low31 = h_low[31U];
-  h_low[0U] = h_low0 & (uint8_t)0xf8U;
-  h_low[31U] = (h_low31 & (uint8_t)127U) | (uint8_t)64U;
+  h_low[0U] = (uint32_t)h_low0 & 0xf8U;
+  h_low[31U] = ((uint32_t)h_low31 & 127U) | 64U;
 }
 
 /********************************************************************************
@@ -1816,8 +1735,8 @@ Compute the expanded keys for an Ed25519 signature.
 void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key)
 {
   uint8_t *public_key = expanded_keys;
-  uint8_t *s_prefix = expanded_keys + (uint32_t)32U;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
+  uint8_t *s_prefix = expanded_keys + 32U;
+  uint8_t *s = expanded_keys + 32U;
   secret_expand(s_prefix, private_key);
   point_mul_g_compress(public_key, s);
 }
@@ -1843,13 +1762,13 @@ Hacl_Ed25519_sign_expanded(
 )
 {
   uint8_t *rs = signature;
-  uint8_t *ss = signature + (uint32_t)32U;
+  uint8_t *ss = signature + 32U;
   uint64_t rq[5U] = { 0U };
   uint64_t hq[5U] = { 0U };
   uint8_t rb[32U] = { 0U };
   uint8_t *public_key = expanded_keys;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
-  uint8_t *prefix = expanded_keys + (uint32_t)64U;
+  uint8_t *s = expanded_keys + 32U;
+  uint8_t *prefix = expanded_keys + 64U;
   sha512_modq_pre(rq, prefix, msg_len, msg);
   store_56(rb, rq);
   point_mul_g_compress(rs, rb);
@@ -1904,7 +1823,7 @@ Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t
     {
       uint8_t hb[32U] = { 0U };
       uint8_t *rs1 = signature;
-      uint8_t *sb = signature + (uint32_t)32U;
+      uint8_t *sb = signature + 32U;
       uint64_t tmp[5U] = { 0U };
       load_32_bytes(tmp, sb);
       bool b1 = gte_q(tmp);
diff --git a/src/msvc/Hacl_FFDHE.c b/src/msvc/Hacl_FFDHE.c
index bc77dbdc..a2cdfa52 100644
--- a/src/msvc/Hacl_FFDHE.c
+++ b/src/msvc/Hacl_FFDHE.c
@@ -35,23 +35,23 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
   {
     case Spec_FFDHE_FFDHE2048:
       {
-        return (uint32_t)256U;
+        return 256U;
       }
     case Spec_FFDHE_FFDHE3072:
       {
-        return (uint32_t)384U;
+        return 384U;
       }
     case Spec_FFDHE_FFDHE4096:
       {
-        return (uint32_t)512U;
+        return 512U;
       }
     case Spec_FFDHE_FFDHE6144:
       {
-        return (uint32_t)768U;
+        return 768U;
       }
     case Spec_FFDHE_FFDHE8192:
       {
-        return (uint32_t)1024U;
+        return 1024U;
       }
     default:
       {
@@ -63,12 +63,46 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
-  KRML_CHECK_SIZE(sizeof (uint8_t), ffdhe_len(a));
-  uint8_t *p_s = (uint8_t *)alloca(ffdhe_len(a) * sizeof (uint8_t));
-  memset(p_s, 0U, ffdhe_len(a) * sizeof (uint8_t));
+  uint32_t sw;
+  switch (a)
+  {
+    case Spec_FFDHE_FFDHE2048:
+      {
+        sw = 256U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE3072:
+      {
+        sw = 384U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE4096:
+      {
+        sw = 512U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE6144:
+      {
+        sw = 768U;
+        break;
+      }
+    case Spec_FFDHE_FFDHE8192:
+      {
+        sw = 1024U;
+        break;
+      }
+    default:
+      {
+        KRML_HOST_EPRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__);
+        KRML_HOST_EXIT(253U);
+      }
+  }
+  KRML_CHECK_SIZE(sizeof (uint8_t), sw);
+  uint8_t *p_s = (uint8_t *)alloca(sw * sizeof (uint8_t));
+  memset(p_s, 0U, sw * sizeof (uint8_t));
   const uint8_t *p;
   switch (a)
   {
@@ -104,88 +138,80 @@ static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
       }
   }
   uint32_t len = ffdhe_len(a);
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint8_t *os = p_s;
     uint8_t x = p[i];
     os[i] = x;
   }
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ffdhe_len(a), p_s, p_n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
-    (uint32_t)8U * ffdhe_len(a) - (uint32_t)1U,
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - 1U) / 8U + 1U,
+    8U * ffdhe_len(a) - 1U,
     p_n,
     r2_n);
 }
 
 static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, uint64_t *p_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *p_n1 = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(p_n1, 0U, nLen * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, p_n[0U], (uint64_t)1U, p_n1);
-  if ((uint32_t)1U < nLen)
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, p_n[0U], 1ULL, p_n1);
+  if (1U < nLen)
   {
-    uint64_t *a1 = p_n + (uint32_t)1U;
-    uint64_t *res1 = p_n1 + (uint32_t)1U;
+    uint64_t *a1 = p_n + 1U;
+    uint64_t *res1 = p_n1 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (nLen - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (nLen - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (nLen - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < nLen - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (nLen - 1U) / 4U * 4U; i < nLen - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
-    KRML_HOST_IGNORE(c1);
+    KRML_MAYBE_UNUSED_VAR(c1);
   }
   else
   {
-    KRML_HOST_IGNORE(c0);
+    KRML_MAYBE_UNUSED_VAR(c0);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *b2 = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (uint32_t)0U;
-  uint32_t j = (uint32_t)0U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = 0U;
+  uint32_t j = 0U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], pk_n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], pk_n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc0;
   uint64_t m0 = res;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(pk_n[i], p_n1[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(pk_n[i], p_n1[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -200,21 +226,19 @@ ffdhe_compute_exp(
   uint8_t *res
 )
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *res_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(res_n, 0U, nLen * sizeof (uint64_t));
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(p_n[0U]);
-  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
+  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - 1U) / 8U + 1U,
     p_n,
     mu,
     r2_n,
     b_n,
-    (uint32_t)64U * nLen,
+    64U * nLen,
     sk_n,
     res_n);
   Hacl_Bignum_Convert_bn_to_bytes_be_uint64(ffdhe_len(a), res_n, res);
@@ -227,7 +251,7 @@ uint32_t Hacl_FFDHE_ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 uint64_t *Hacl_FFDHE_new_ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *res = (uint64_t *)KRML_HOST_CALLOC(nLen + nLen, sizeof (uint64_t));
   if (res == NULL)
@@ -249,17 +273,17 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *g_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(g_n, 0U, nLen * sizeof (uint64_t));
-  uint8_t g = (uint8_t)0U;
+  uint8_t g = 0U;
   {
     uint8_t *os = &g;
     uint8_t x = Hacl_Impl_FFDHE_Constants_ffdhe_g2[0U];
     os[0U] = x;
   }
-  Hacl_Bignum_Convert_bn_from_bytes_be_uint64((uint32_t)1U, &g, g_n);
+  Hacl_Bignum_Convert_bn_from_bytes_be_uint64(1U, &g, g_n);
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *sk_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(sk_n, 0U, nLen * sizeof (uint64_t));
@@ -270,7 +294,7 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 void Hacl_FFDHE_ffdhe_secret_to_public(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *p_r2_n = (uint64_t *)alloca((nLen + nLen) * sizeof (uint64_t));
   memset(p_r2_n, 0U, (nLen + nLen) * sizeof (uint64_t));
@@ -288,7 +312,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *sk_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
@@ -299,7 +323,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, sk, sk_n);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, pk, pk_n);
   uint64_t m = ffdhe_check_pk(a, pk_n, p_n);
-  if (m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  if (m == 0xFFFFFFFFFFFFFFFFULL)
   {
     ffdhe_compute_exp(a, p_r2_n, sk_n, pk_n, ss);
   }
@@ -310,7 +334,7 @@ uint64_t
 Hacl_FFDHE_ffdhe_shared_secret(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk, uint8_t *ss)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *p_n = (uint64_t *)alloca((nLen + nLen) * sizeof (uint64_t));
   memset(p_n, 0U, (nLen + nLen) * sizeof (uint64_t));
diff --git a/src/msvc/Hacl_Frodo1344.c b/src/msvc/Hacl_Frodo1344.c
index 2951f848..01e8e007 100644
--- a/src/msvc/Hacl_Frodo1344.c
+++ b/src/msvc/Hacl_Frodo1344.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo1344_crypto_bytes = (uint32_t)32U;
+uint32_t Hacl_Frodo1344_crypto_bytes = 32U;
 
-uint32_t Hacl_Frodo1344_crypto_publickeybytes = (uint32_t)21520U;
+uint32_t Hacl_Frodo1344_crypto_publickeybytes = 21520U;
 
-uint32_t Hacl_Frodo1344_crypto_secretkeybytes = (uint32_t)43088U;
+uint32_t Hacl_Frodo1344_crypto_secretkeybytes = 43088U;
 
-uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = (uint32_t)21632U;
+uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = 21632U;
 
 uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[80U] = { 0U };
-  randombytes_((uint32_t)80U, coins);
+  randombytes_(80U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)32U;
-  uint8_t *z = coins + (uint32_t)64U;
+  uint8_t *seed_se = coins + 32U;
+  uint8_t *z = coins + 64U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  Hacl_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t e_matrix[10752U] = { 0U };
   uint8_t r[43008U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43008U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U,
-    (uint32_t)8U,
-    r + (uint32_t)21504U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(33U, shake_input_seed_se, 43008U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r + 21504U, e_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)1344U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)1344U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)1344U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)1344U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)10752U, uint16_t);
-  uint32_t slen1 = (uint32_t)43056U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(1344U, 1344U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(1344U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(1344U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(1344U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t);
+  uint32_t slen1 = 43056U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)32U, pk, (uint32_t)21520U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)80U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 32U * sizeof (uint8_t));
+  memcpy(sk_p + 32U, pk, 21520U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(21520U, pk, 32U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 80U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[32U] = { 0U };
-  randombytes_((uint32_t)32U, coins);
+  randombytes_(32U, coins);
   uint8_t seed_se_k[64U] = { 0U };
   uint8_t pkh_mu[64U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)32U, coins, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)64U, pkh_mu, (uint32_t)64U, seed_se_k);
+  Hacl_SHA3_shake256_hacl(21520U, pk, 32U, pkh_mu);
+  memcpy(pkh_mu + 32U, coins, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(64U, pkh_mu, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)32U;
+  uint8_t *k = seed_se_k + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[10752U] = { 0U };
   uint16_t ep_matrix[10752U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
+  uint8_t *c2 = ct + 21504U;
   uint16_t bp_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 1344U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)21664U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)21632U, k, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)32U, ss);
+  memcpy(shake_input_ss, ct, 21632U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 21632U, k, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 32U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(coins, 32U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[10752U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 21504U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 1344U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[32U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)1344U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(1344U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 1344U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 4U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[64U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)64U;
+  uint32_t pkh_mu_decode_len = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)43056U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)32U, mu_decode, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)64U, seed_se_k);
+  uint8_t *pkh = sk + 43056U;
+  memcpy(pkh_mu_decode, pkh, 32U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 32U, mu_decode, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)32U;
+  uint8_t *kp = seed_se_k + 32U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[10752U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)32U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
+  uint8_t *pk = sk + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)1344U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 1344U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 1344U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)21664U;
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)21632U, kp_s, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)32U, ss);
+  memcpy(ss_init, ct, 21632U * sizeof (uint8_t));
+  memcpy(ss_init + 21632U, kp_s, 32U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, 32U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 32U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 32U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo64.c b/src/msvc/Hacl_Frodo64.c
index 45ee9dd6..7492b48c 100644
--- a/src/msvc/Hacl_Frodo64.c
+++ b/src/msvc/Hacl_Frodo64.c
@@ -34,145 +34,111 @@
  */
 
 
-uint32_t Hacl_Frodo64_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo64_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo64_crypto_publickeybytes = (uint32_t)976U;
+uint32_t Hacl_Frodo64_crypto_publickeybytes = 976U;
 
-uint32_t Hacl_Frodo64_crypto_secretkeybytes = (uint32_t)2032U;
+uint32_t Hacl_Frodo64_crypto_secretkeybytes = 2032U;
 
-uint32_t Hacl_Frodo64_crypto_ciphertextbytes = (uint32_t)1080U;
+uint32_t Hacl_Frodo64_crypto_ciphertextbytes = 1080U;
 
 uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  Hacl_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t e_matrix[512U] = { 0U };
   uint8_t r[2048U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2048U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U,
-    (uint32_t)8U,
-    r + (uint32_t)1024U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 2048U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r + 1024U, e_matrix);
   uint16_t b_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)64U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)64U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b_matrix, b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)64U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)512U, uint16_t);
-  uint32_t slen1 = (uint32_t)2016U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(64U, 64U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(64U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(64U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(64U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 512U, uint16_t);
+  uint32_t slen1 = 2016U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)976U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 976U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(976U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_SHA3_shake128_hacl(976U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[512U] = { 0U };
   uint16_t ep_matrix[512U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
+  uint8_t *c2 = ct + 960U;
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 64U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)1080U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 1080U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 1080U, k, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -180,39 +146,30 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 960U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 64U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)64U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(64U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 64U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)2016U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 2016U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[512U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -221,80 +178,58 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)64U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 64U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 64U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)1080U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 1080U * sizeof (uint8_t));
+  memcpy(ss_init + 1080U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo640.c b/src/msvc/Hacl_Frodo640.c
index badd2bae..efe12ece 100644
--- a/src/msvc/Hacl_Frodo640.c
+++ b/src/msvc/Hacl_Frodo640.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo640_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo640_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo640_crypto_publickeybytes = (uint32_t)9616U;
+uint32_t Hacl_Frodo640_crypto_publickeybytes = 9616U;
 
-uint32_t Hacl_Frodo640_crypto_secretkeybytes = (uint32_t)19888U;
+uint32_t Hacl_Frodo640_crypto_secretkeybytes = 19888U;
 
-uint32_t Hacl_Frodo640_crypto_ciphertextbytes = (uint32_t)9720U;
+uint32_t Hacl_Frodo640_crypto_ciphertextbytes = 9720U;
 
 uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  Hacl_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t e_matrix[5120U] = { 0U };
   uint8_t r[20480U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20480U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U,
-    (uint32_t)8U,
-    r + (uint32_t)10240U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 20480U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r + 10240U, e_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)640U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)640U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)640U,
-    (uint32_t)8U,
-    (uint32_t)15U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)640U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)5120U, uint16_t);
-  uint32_t slen1 = (uint32_t)19872U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(640U, 640U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(640U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(640U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(640U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t);
+  uint32_t slen1 = 19872U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)9616U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 9616U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(9616U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_SHA3_shake128_hacl(9616U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[5120U] = { 0U };
   uint16_t ep_matrix[5120U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
+  uint8_t *c2 = ct + 9600U;
   uint16_t bp_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 640U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)9720U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 9720U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 9720U, k, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[5120U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 9600U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 640U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)640U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(640U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 640U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)19872U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 19872U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[5120U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,81 +175,59 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)640U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 640U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 640U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)9720U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 9720U * sizeof (uint8_t));
+  memcpy(ss_init + 9720U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo976.c b/src/msvc/Hacl_Frodo976.c
index dbd9bc32..7915de1e 100644
--- a/src/msvc/Hacl_Frodo976.c
+++ b/src/msvc/Hacl_Frodo976.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo976_crypto_bytes = (uint32_t)24U;
+uint32_t Hacl_Frodo976_crypto_bytes = 24U;
 
-uint32_t Hacl_Frodo976_crypto_publickeybytes = (uint32_t)15632U;
+uint32_t Hacl_Frodo976_crypto_publickeybytes = 15632U;
 
-uint32_t Hacl_Frodo976_crypto_secretkeybytes = (uint32_t)31296U;
+uint32_t Hacl_Frodo976_crypto_secretkeybytes = 31296U;
 
-uint32_t Hacl_Frodo976_crypto_ciphertextbytes = (uint32_t)15744U;
+uint32_t Hacl_Frodo976_crypto_ciphertextbytes = 15744U;
 
 uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[64U] = { 0U };
-  randombytes_((uint32_t)64U, coins);
+  randombytes_(64U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)24U;
-  uint8_t *z = coins + (uint32_t)48U;
+  uint8_t *seed_se = coins + 24U;
+  uint8_t *z = coins + 48U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  Hacl_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t e_matrix[7808U] = { 0U };
   uint8_t r[31232U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31232U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U,
-    (uint32_t)8U,
-    r + (uint32_t)15616U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(25U, shake_input_seed_se, 31232U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r + 15616U, e_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)976U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)976U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)976U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)976U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)7808U, uint16_t);
-  uint32_t slen1 = (uint32_t)31272U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(976U, 976U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(976U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(976U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(976U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t);
+  uint32_t slen1 = 31272U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)24U, pk, (uint32_t)15632U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)64U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 24U * sizeof (uint8_t));
+  memcpy(sk_p + 24U, pk, 15632U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(15632U, pk, 24U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 64U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[24U] = { 0U };
-  randombytes_((uint32_t)24U, coins);
+  randombytes_(24U, coins);
   uint8_t seed_se_k[48U] = { 0U };
   uint8_t pkh_mu[48U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)24U, coins, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)48U, pkh_mu, (uint32_t)48U, seed_se_k);
+  Hacl_SHA3_shake256_hacl(15632U, pk, 24U, pkh_mu);
+  memcpy(pkh_mu + 24U, coins, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(48U, pkh_mu, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)24U;
+  uint8_t *k = seed_se_k + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[7808U] = { 0U };
   uint16_t ep_matrix[7808U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
+  uint8_t *c2 = ct + 15616U;
   uint16_t bp_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 976U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)15768U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)15744U, k, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)24U, ss);
+  memcpy(shake_input_ss, ct, 15744U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 15744U, k, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 24U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(coins, 24U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[7808U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 15616U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 976U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[24U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)976U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(976U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 976U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 3U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[48U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)48U;
+  uint32_t pkh_mu_decode_len = 48U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)31272U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)24U, mu_decode, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)48U, seed_se_k);
+  uint8_t *pkh = sk + 31272U;
+  memcpy(pkh_mu_decode, pkh, 24U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 24U, mu_decode, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)24U;
+  uint8_t *kp = seed_se_k + 24U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[7808U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)24U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
+  uint8_t *pk = sk + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)976U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 976U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 976U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[24U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+  for (uint32_t i = 0U; i < 24U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)15768U;
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)15744U, kp_s, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)24U, ss);
+  memcpy(ss_init, ct, 15744U * sizeof (uint8_t));
+  memcpy(ss_init + 15744U, kp_s, 24U * sizeof (uint8_t));
+  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, 24U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)24U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 24U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 24U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo_KEM.c b/src/msvc/Hacl_Frodo_KEM.c
index 4265ac0e..e0a65a47 100644
--- a/src/msvc/Hacl_Frodo_KEM.c
+++ b/src/msvc/Hacl_Frodo_KEM.c
@@ -30,6 +30,6 @@
 
 void randombytes_(uint32_t len, uint8_t *res)
 {
-  KRML_HOST_IGNORE(Lib_RandomBuffer_System_randombytes(res, len));
+  Lib_RandomBuffer_System_randombytes(res, len);
 }
 
diff --git a/src/msvc/Hacl_GenericField32.c b/src/msvc/Hacl_GenericField32.c
index 47ca15e8..750d56fc 100644
--- a/src/msvc/Hacl_GenericField32.c
+++ b/src/msvc/Hacl_GenericField32.c
@@ -56,7 +56,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField32_field_modulus_check(uint32_t len, uint32_t *n)
 {
   uint32_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u32(len, n);
-  return m == (uint32_t)0xFFFFFFFFU;
+  return m == 0xFFFFFFFFU;
 }
 
 /**
@@ -82,7 +82,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -283,27 +283,27 @@ Hacl_GenericField32_exp_consttime(
   uint32_t *aMc = (uint32_t *)alloca(k1.len * sizeof (uint32_t));
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -314,9 +314,9 @@ Hacl_GenericField32_exp_consttime(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -324,22 +324,22 @@ Hacl_GenericField32_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len1 * sizeof (uint32_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t *table = (uint32_t *)alloca(16U * len1 * sizeof (uint32_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -350,29 +350,29 @@ Hacl_GenericField32_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+      memcpy(resM, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = resM;
           uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -388,24 +388,24 @@ Hacl_GenericField32_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp0 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = tmp0;
           uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -450,7 +450,7 @@ Hacl_GenericField32_exp_vartime(
   uint32_t *aMc = (uint32_t *)alloca(k1.len * sizeof (uint32_t));
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
@@ -460,13 +460,13 @@ Hacl_GenericField32_exp_vartime(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -478,22 +478,22 @@ Hacl_GenericField32_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len1 * sizeof (uint32_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t *table = (uint32_t *)alloca(16U * len1 * sizeof (uint32_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -504,21 +504,21 @@ Hacl_GenericField32_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
       uint32_t bits_l32 = bits_c;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -532,16 +532,16 @@ Hacl_GenericField32_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp0 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
       uint32_t bits_l32 = bits_l;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -574,38 +574,33 @@ Hacl_GenericField32_inverse(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *n2 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -614,7 +609,7 @@ Hacl_GenericField32_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField32_exp_vartime(k, aM, k1.len * (uint32_t)32U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField32_exp_vartime(k, aM, k1.len * 32U, n2, aInvM);
 }
 
diff --git a/src/msvc/Hacl_GenericField64.c b/src/msvc/Hacl_GenericField64.c
index e8084285..04f54288 100644
--- a/src/msvc/Hacl_GenericField64.c
+++ b/src/msvc/Hacl_GenericField64.c
@@ -55,7 +55,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField64_field_modulus_check(uint32_t len, uint64_t *n)
 {
   uint64_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u64(len, n);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -81,7 +81,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -282,27 +282,27 @@ Hacl_GenericField64_exp_consttime(
   uint64_t *aMc = (uint64_t *)alloca(k1.len * sizeof (uint64_t));
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -313,9 +313,9 @@ Hacl_GenericField64_exp_consttime(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -323,22 +323,22 @@ Hacl_GenericField64_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len1 * sizeof (uint64_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t *table = (uint64_t *)alloca(16U * len1 * sizeof (uint64_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -349,29 +349,29 @@ Hacl_GenericField64_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+      memcpy(resM, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = resM;
           uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -387,24 +387,24 @@ Hacl_GenericField64_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp0 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = tmp0;
           uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -449,7 +449,7 @@ Hacl_GenericField64_exp_vartime(
   uint64_t *aMc = (uint64_t *)alloca(k1.len * sizeof (uint64_t));
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
@@ -459,13 +459,13 @@ Hacl_GenericField64_exp_vartime(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -477,22 +477,22 @@ Hacl_GenericField64_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len1 * sizeof (uint64_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t *table = (uint64_t *)alloca(16U * len1 * sizeof (uint64_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -503,21 +503,21 @@ Hacl_GenericField64_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
       uint32_t bits_l32 = (uint32_t)bits_c;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -531,16 +531,16 @@ Hacl_GenericField64_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp0 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
       uint32_t bits_l32 = (uint32_t)bits_l;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -573,38 +573,33 @@ Hacl_GenericField64_inverse(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *n2 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -613,7 +608,7 @@ Hacl_GenericField64_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField64_exp_vartime(k, aM, k1.len * (uint32_t)64U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField64_exp_vartime(k, aM, k1.len * 64U, n2, aInvM);
 }
 
diff --git a/src/msvc/Hacl_HKDF.c b/src/msvc/Hacl_HKDF.c
index ce57b82c..f3b4d90f 100644
--- a/src/msvc/Hacl_HKDF.c
+++ b/src/msvc/Hacl_HKDF.c
@@ -45,39 +45,39 @@ Hacl_HKDF_expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -125,39 +125,39 @@ Hacl_HKDF_expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -205,39 +205,39 @@ Hacl_HKDF_expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -285,39 +285,39 @@ Hacl_HKDF_expand_blake2s_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -365,39 +365,39 @@ Hacl_HKDF_expand_blake2b_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/Hacl_HKDF_Blake2b_256.c b/src/msvc/Hacl_HKDF_Blake2b_256.c
index 22b5549b..3280cb8f 100644
--- a/src/msvc/Hacl_HKDF_Blake2b_256.c
+++ b/src/msvc/Hacl_HKDF_Blake2b_256.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2b_256_expand_blake2b_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/Hacl_HKDF_Blake2s_128.c b/src/msvc/Hacl_HKDF_Blake2s_128.c
index 24d6cb3d..7007a4eb 100644
--- a/src/msvc/Hacl_HKDF_Blake2s_128.c
+++ b/src/msvc/Hacl_HKDF_Blake2s_128.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2s_128_expand_blake2s_128(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/Hacl_HMAC.c b/src/msvc/Hacl_HMAC.c
index d46c4812..ca7f255f 100644
--- a/src/msvc/Hacl_HMAC.c
+++ b/src/msvc/Hacl_HMAC.c
@@ -45,23 +45,23 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -71,42 +71,37 @@ Hacl_HMAC_legacy_compute_sha1(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -118,25 +113,21 @@ Hacl_HMAC_legacy_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
+    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, 1U);
     Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -147,12 +138,9 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
+  Hacl_Hash_SHA1_legacy_update_multi(s, opad, 1U);
   Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
+  Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
 }
 
@@ -171,23 +159,23 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -197,48 +185,45 @@ Hacl_HMAC_compute_sha2_256(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_SHA2_Scalar32_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -250,9 +235,9 @@ Hacl_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, ipad, s);
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, ipad, s);
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
       + (uint64_t)full_blocks_len
       + (uint64_t)rem_len,
       rem_len,
@@ -262,15 +247,14 @@ Hacl_HMAC_compute_sha2_256(
   Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -281,9 +265,9 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, opad, s);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, opad, s);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)64U
     + (uint64_t)full_blocks_len
     + (uint64_t)rem_len,
     rem_len,
@@ -307,23 +291,23 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -333,49 +317,49 @@ Hacl_HMAC_compute_sha2_384(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -387,9 +371,9 @@ Hacl_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -399,15 +383,14 @@ Hacl_HMAC_compute_sha2_384(
   Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -418,9 +401,9 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -444,23 +427,23 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
@@ -470,49 +453,49 @@ Hacl_HMAC_compute_sha2_512(
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
     uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -524,9 +507,9 @@ Hacl_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, ipad, s);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
@@ -536,15 +519,14 @@ Hacl_HMAC_compute_sha2_512(
   Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
   Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -555,9 +537,9 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, opad, s);
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
@@ -581,66 +563,66 @@ Hacl_HMAC_compute_blake2s_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2s_32_blake2s(32U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Blake2s_32_blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -653,9 +635,9 @@ Hacl_HMAC_compute_blake2s_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
@@ -665,22 +647,21 @@ Hacl_HMAC_compute_blake2s_32(
     Hacl_Blake2s_32_blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Blake2s_32_blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -692,9 +673,9 @@ Hacl_HMAC_compute_blake2s_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Blake2s_32_blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
@@ -704,10 +685,10 @@ Hacl_HMAC_compute_blake2s_32(
   Hacl_Blake2s_32_blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst, s0);
 }
 
 /**
@@ -725,71 +706,71 @@ Hacl_HMAC_compute_blake2b_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2b_32_blake2b(64U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -802,14 +783,14 @@ Hacl_HMAC_compute_blake2b_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -819,23 +800,22 @@ Hacl_HMAC_compute_blake2b_32(
     Hacl_Blake2b_32_blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Blake2b_32_blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -847,14 +827,14 @@ Hacl_HMAC_compute_blake2b_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -864,10 +844,10 @@ Hacl_HMAC_compute_blake2b_32(
   Hacl_Blake2b_32_blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst, s0);
 }
 
diff --git a/src/msvc/Hacl_HMAC_Blake2b_256.c b/src/msvc/Hacl_HMAC_Blake2b_256.c
index 20b050de..aa5985e9 100644
--- a/src/msvc/Hacl_HMAC_Blake2b_256.c
+++ b/src/msvc/Hacl_HMAC_Blake2b_256.c
@@ -43,71 +43,71 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_256_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2b_256_blake2b(64U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_256_blake2b_init(s, 0U, 64U);
   Lib_IntVector_Intrinsics_vec256 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last((uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -120,14 +120,14 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Blake2b_256_blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -137,23 +137,22 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
     Hacl_Blake2b_256_blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Blake2b_256_blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_256_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Blake2b_256_blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -165,14 +164,14 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+  Hacl_Blake2b_256_blake2b_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Blake2b_256_blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
@@ -182,10 +181,10 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   Hacl_Blake2b_256_blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Blake2b_256_blake2b_finish(64U, dst, s0);
 }
 
diff --git a/src/msvc/Hacl_HMAC_Blake2s_128.c b/src/msvc/Hacl_HMAC_Blake2s_128.c
index 144722e4..06611b11 100644
--- a/src/msvc/Hacl_HMAC_Blake2s_128.c
+++ b/src/msvc/Hacl_HMAC_Blake2s_128.c
@@ -42,66 +42,66 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_128_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Blake2s_128_blake2s(32U, nkey, key_len, key, 0U, NULL);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 s[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_128_blake2s_init(s, 0U, 32U);
   Lib_IntVector_Intrinsics_vec128 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Blake2s_128_blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -114,9 +114,9 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Blake2s_128_blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Blake2s_128_blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
@@ -126,22 +126,21 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
     Hacl_Blake2s_128_blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Blake2s_128_blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_128_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Blake2s_128_blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -153,9 +152,9 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Blake2s_128_blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Blake2s_128_blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
@@ -165,9 +164,9 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   Hacl_Blake2s_128_blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Blake2s_128_blake2s_finish(32U, dst, s0);
 }
 
diff --git a/src/msvc/Hacl_HMAC_DRBG.c b/src/msvc/Hacl_HMAC_DRBG.c
index b3acf354..0f2fa923 100644
--- a/src/msvc/Hacl_HMAC_DRBG.c
+++ b/src/msvc/Hacl_HMAC_DRBG.c
@@ -25,15 +25,15 @@
 
 #include "Hacl_HMAC_DRBG.h"
 
-uint32_t Hacl_HMAC_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t Hacl_HMAC_DRBG_reseed_interval = 1024U;
 
-uint32_t Hacl_HMAC_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_output_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_additional_input_length = 65536U;
 
 /**
 Return the minimal entropy input length of the desired hash function.
@@ -46,19 +46,19 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -71,8 +71,8 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 bool
 Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -92,25 +92,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         k = buf;
         break;
       }
@@ -125,25 +125,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         v = buf;
         break;
       }
@@ -154,7 +154,7 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
       }
   }
   uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return ((Hacl_HMAC_DRBG_state){ .k = k, .v = v, .reseed_counter = ctr });
 }
 
@@ -203,45 +203,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 20U * sizeof (uint8_t));
+        memset(v, 1U, 20U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         break;
       }
@@ -264,45 +262,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 32U * sizeof (uint8_t));
+        memset(v, 1U, 32U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         break;
       }
@@ -325,45 +321,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 48U * sizeof (uint8_t));
+        memset(v, 1U, 48U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         break;
       }
@@ -386,45 +380,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 64U * sizeof (uint8_t));
+        memset(v, 1U, 64U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         break;
       }
@@ -474,42 +466,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 21U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 21U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -528,42 +520,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 33U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 33U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -582,42 +574,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 49U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 49U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -636,42 +628,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 65U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 65U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     default:
@@ -713,93 +705,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)21U + additional_input_len;
+          uint32_t input_len = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[20U] = (uint8_t)0U;
-          Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-          Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[20U] = 0U;
+          Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+          Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+          memcpy(k, k_, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+            uint32_t input_len0 = 21U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 20U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)21U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[20U] = (uint8_t)1U;
-            Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-            Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-            memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+            input[20U] = 1U;
+            Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+            Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+            memcpy(k, k_0, 20U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)20U;
+        uint32_t max = n / 20U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+          Hacl_HMAC_legacy_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)20U < n)
+        if (max * 20U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)20U;
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 20U;
+          Hacl_HMAC_legacy_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)21U + additional_input_len;
+        uint32_t input_len = 21U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_legacy_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_legacy_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+          uint32_t input_len0 = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_legacy_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_legacy_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -811,93 +797,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)33U + additional_input_len;
+          uint32_t input_len = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[32U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-          Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[32U] = 0U;
+          Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+          Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+          memcpy(k, k_, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+            uint32_t input_len0 = 33U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 32U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)33U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[32U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-            Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-            memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+            input[32U] = 1U;
+            Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+            Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+            memcpy(k, k_0, 32U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)32U;
+        uint32_t max = n / 32U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)32U < n)
+        if (max * 32U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)32U;
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 32U;
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)33U + additional_input_len;
+        uint32_t input_len = 33U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+          uint32_t input_len0 = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -909,93 +889,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)49U + additional_input_len;
+          uint32_t input_len = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[48U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-          Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[48U] = 0U;
+          Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+          Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+          memcpy(k, k_, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+            uint32_t input_len0 = 49U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 48U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)49U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[48U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-            Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-            memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+            input[48U] = 1U;
+            Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+            Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+            memcpy(k, k_0, 48U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)48U;
+        uint32_t max = n / 48U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)48U < n)
+        if (max * 48U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)48U;
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 48U;
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)49U + additional_input_len;
+        uint32_t input_len = 49U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+          uint32_t input_len0 = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -1007,93 +981,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)65U + additional_input_len;
+          uint32_t input_len = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[64U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-          Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[64U] = 0U;
+          Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+          Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+          memcpy(k, k_, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+            uint32_t input_len0 = 65U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 64U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)65U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[64U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-            Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-            memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+            input[64U] = 1U;
+            Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+            Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+            memcpy(k, k_0, 64U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)64U;
+        uint32_t max = n / 64U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)64U < n)
+        if (max * 64U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)64U;
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 64U;
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)65U + additional_input_len;
+        uint32_t input_len = 65U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+          uint32_t input_len0 = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     default:
@@ -1106,7 +1074,7 @@ Hacl_HMAC_DRBG_generate(
 
 void Hacl_HMAC_DRBG_free(Spec_Hash_Definitions_hash_alg uu___, Hacl_HMAC_DRBG_state s)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c
index f05fd2bd..c6b62c91 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c
index 5e5c7788..94258f90 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c
index 879d3a76..742685e3 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c
index 0ecc22be..64346ee1 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c
index ed3f7eed..6dc1f228 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c
index 1c4b30e4..9bc18c9d 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c
index 70b41c45..2ff781ed 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c
index 6a4f3d01..aef8a42e 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c
index 725bb6cd..c3414ae3 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c
index e7be8835..4bea61e0 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,19 +553,19 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -631,20 +576,20 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +607,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +618,42 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c
index 92672abe..76d235e8 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c
index 5ad7e761..501387e2 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,19 +552,19 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -630,20 +575,20 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +606,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +617,42 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c b/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c
index 5dad7dcf..50921b8f 100644
--- a/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c
+++ b/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp2 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp3 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp4 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp5 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,19 +565,19 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -643,20 +588,20 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +619,7 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +630,42 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c b/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c
index 2e932f40..fdd7f0ff 100644
--- a/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c
+++ b/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp2 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp3 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp4 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp5 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,19 +565,19 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -643,20 +588,20 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +619,7 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +630,42 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c b/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c
index 34dc3403..c8cbe7e5 100644
--- a/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c
+++ b/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp2 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp3 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp4 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp5 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,19 +565,19 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
       nonce,
       aadlen,
@@ -643,20 +588,20 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
       o_ct + plainlen);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +619,7 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +630,42 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
     uint32_t
     res1 =
       Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
         nonce,
         aadlen,
         aad,
-        ctlen - (uint32_t)16U,
+        ctlen - 16U,
         o_pt,
         ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        ct + ctlen - 16U);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Hash_Base.c b/src/msvc/Hacl_Hash_Base.c
index 40796f14..02d893e3 100644
--- a/src/msvc/Hacl_Hash_Base.c
+++ b/src/msvc/Hacl_Hash_Base.c
@@ -31,27 +31,27 @@ uint32_t Hacl_Hash_Definitions_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -67,59 +67,59 @@ uint32_t Hacl_Hash_Definitions_block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -135,27 +135,27 @@ uint32_t Hacl_Hash_Definitions_hash_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)5U;
+        return 5U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)7U;
+        return 7U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)6U;
+        return 6U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -171,51 +171,51 @@ uint32_t Hacl_Hash_Definitions_hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
diff --git a/src/msvc/Hacl_Hash_Blake2.c b/src/msvc/Hacl_Hash_Blake2.c
index aecc6165..44c2a29f 100644
--- a/src/msvc/Hacl_Hash_Blake2.c
+++ b/src/msvc/Hacl_Hash_Blake2.c
@@ -39,11 +39,11 @@ blake2b_update_block(
 {
   uint64_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
+    uint8_t *bj = d + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -52,52 +52,52 @@ blake2b_update_block(
   uint64_t wv_14;
   if (flag)
   {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
   }
   else
   {
-    wv_14 = (uint64_t)0U;
+    wv_14 = 0ULL;
   }
-  uint64_t wv_15 = (uint64_t)0U;
+  uint64_t wv_15 = 0ULL;
   mask[0U] = FStar_UInt128_uint128_to_uint64(totlen);
-  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U));
+  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U));
   mask[2U] = wv_14;
   mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint64_t));
-  uint64_t *wv3 = wv + (uint32_t)12U;
+  memcpy(wv, hash, 16U * sizeof (uint64_t));
+  uint64_t *wv3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = wv3;
     uint64_t x = wv3[i] ^ mask[i];
     os[i] = x;);
   KRML_MAYBE_FOR12(i0,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
     uint64_t m_st[16U] = { 0U };
     uint64_t *r0 = m_st;
-    uint64_t *r1 = m_st + (uint32_t)4U;
-    uint64_t *r20 = m_st + (uint32_t)8U;
-    uint64_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    uint64_t *r1 = m_st + 4U;
+    uint64_t *r20 = m_st + 8U;
+    uint64_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     uint64_t uu____0 = m_w[s2];
     uint64_t uu____1 = m_w[s4];
     uint64_t uu____2 = m_w[s6];
@@ -127,138 +127,138 @@ blake2b_update_block(
     r30[2U] = uu____10;
     r30[3U] = uu____11;
     uint64_t *x = m_st;
-    uint64_t *y = m_st + (uint32_t)4U;
-    uint64_t *z = m_st + (uint32_t)8U;
-    uint64_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint64_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b0 = wv + b0 * (uint32_t)4U;
+    uint64_t *y = m_st + 4U;
+    uint64_t *z = m_st + 8U;
+    uint64_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint64_t *wv_a0 = wv + a * 4U;
+    uint64_t *wv_b0 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a0;
       uint64_t x1 = wv_a0[i] + wv_b0[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a0;
       uint64_t x1 = wv_a0[i] + x[i];
       os[i] = x1;);
-    uint64_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b1 = wv + a * (uint32_t)4U;
+    uint64_t *wv_a1 = wv + d10 * 4U;
+    uint64_t *wv_b1 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a1;
       uint64_t x1 = wv_a1[i] ^ wv_b1[i];
       os[i] = x1;);
     uint64_t *r10 = wv_a1;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r10;
       uint64_t x1 = r10[i];
-      uint64_t x10 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
+      uint64_t x10 = x1 >> 32U | x1 << 32U;
       os[i] = x10;);
-    uint64_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b2 = wv + d10 * (uint32_t)4U;
+    uint64_t *wv_a2 = wv + c0 * 4U;
+    uint64_t *wv_b2 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a2;
       uint64_t x1 = wv_a2[i] + wv_b2[i];
       os[i] = x1;);
-    uint64_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b3 = wv + c0 * (uint32_t)4U;
+    uint64_t *wv_a3 = wv + b0 * 4U;
+    uint64_t *wv_b3 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a3;
       uint64_t x1 = wv_a3[i] ^ wv_b3[i];
       os[i] = x1;);
     uint64_t *r12 = wv_a3;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r12;
       uint64_t x1 = r12[i];
-      uint64_t x10 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
+      uint64_t x10 = x1 >> 24U | x1 << 40U;
       os[i] = x10;);
-    uint64_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b4 = wv + b0 * (uint32_t)4U;
+    uint64_t *wv_a4 = wv + a * 4U;
+    uint64_t *wv_b4 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a4;
       uint64_t x1 = wv_a4[i] + wv_b4[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a4;
       uint64_t x1 = wv_a4[i] + y[i];
       os[i] = x1;);
-    uint64_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b5 = wv + a * (uint32_t)4U;
+    uint64_t *wv_a5 = wv + d10 * 4U;
+    uint64_t *wv_b5 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a5;
       uint64_t x1 = wv_a5[i] ^ wv_b5[i];
       os[i] = x1;);
     uint64_t *r13 = wv_a5;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r13;
       uint64_t x1 = r13[i];
-      uint64_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
+      uint64_t x10 = x1 >> 16U | x1 << 48U;
       os[i] = x10;);
-    uint64_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b6 = wv + d10 * (uint32_t)4U;
+    uint64_t *wv_a6 = wv + c0 * 4U;
+    uint64_t *wv_b6 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a6;
       uint64_t x1 = wv_a6[i] + wv_b6[i];
       os[i] = x1;);
-    uint64_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b7 = wv + c0 * (uint32_t)4U;
+    uint64_t *wv_a7 = wv + b0 * 4U;
+    uint64_t *wv_b7 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a7;
       uint64_t x1 = wv_a7[i] ^ wv_b7[i];
       os[i] = x1;);
     uint64_t *r14 = wv_a7;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r14;
       uint64_t x1 = r14[i];
-      uint64_t x10 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
+      uint64_t x10 = x1 >> 63U | x1 << 1U;
       os[i] = x10;);
-    uint64_t *r15 = wv + (uint32_t)4U;
-    uint64_t *r21 = wv + (uint32_t)8U;
-    uint64_t *r31 = wv + (uint32_t)12U;
+    uint64_t *r15 = wv + 4U;
+    uint64_t *r21 = wv + 8U;
+    uint64_t *r31 = wv + 12U;
     uint64_t *r110 = r15;
     uint64_t x00 = r110[1U];
     uint64_t x10 = r110[2U];
@@ -286,135 +286,135 @@ blake2b_update_block(
     r112[1U] = x12;
     r112[2U] = x22;
     r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint64_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b8 = wv + b * (uint32_t)4U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint64_t *wv_a = wv + a0 * 4U;
+    uint64_t *wv_b8 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a;
       uint64_t x1 = wv_a[i] + wv_b8[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a;
       uint64_t x1 = wv_a[i] + z[i];
       os[i] = x1;);
-    uint64_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b9 = wv + a0 * (uint32_t)4U;
+    uint64_t *wv_a8 = wv + d1 * 4U;
+    uint64_t *wv_b9 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a8;
       uint64_t x1 = wv_a8[i] ^ wv_b9[i];
       os[i] = x1;);
     uint64_t *r16 = wv_a8;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r16;
       uint64_t x1 = r16[i];
-      uint64_t x13 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
+      uint64_t x13 = x1 >> 32U | x1 << 32U;
       os[i] = x13;);
-    uint64_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b10 = wv + d1 * (uint32_t)4U;
+    uint64_t *wv_a9 = wv + c * 4U;
+    uint64_t *wv_b10 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a9;
       uint64_t x1 = wv_a9[i] + wv_b10[i];
       os[i] = x1;);
-    uint64_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b11 = wv + c * (uint32_t)4U;
+    uint64_t *wv_a10 = wv + b * 4U;
+    uint64_t *wv_b11 = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a10;
       uint64_t x1 = wv_a10[i] ^ wv_b11[i];
       os[i] = x1;);
     uint64_t *r17 = wv_a10;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r17;
       uint64_t x1 = r17[i];
-      uint64_t x13 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
+      uint64_t x13 = x1 >> 24U | x1 << 40U;
       os[i] = x13;);
-    uint64_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b12 = wv + b * (uint32_t)4U;
+    uint64_t *wv_a11 = wv + a0 * 4U;
+    uint64_t *wv_b12 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a11;
       uint64_t x1 = wv_a11[i] + wv_b12[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a11;
       uint64_t x1 = wv_a11[i] + w[i];
       os[i] = x1;);
-    uint64_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b13 = wv + a0 * (uint32_t)4U;
+    uint64_t *wv_a12 = wv + d1 * 4U;
+    uint64_t *wv_b13 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a12;
       uint64_t x1 = wv_a12[i] ^ wv_b13[i];
       os[i] = x1;);
     uint64_t *r18 = wv_a12;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r18;
       uint64_t x1 = r18[i];
-      uint64_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
+      uint64_t x13 = x1 >> 16U | x1 << 48U;
       os[i] = x13;);
-    uint64_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b14 = wv + d1 * (uint32_t)4U;
+    uint64_t *wv_a13 = wv + c * 4U;
+    uint64_t *wv_b14 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a13;
       uint64_t x1 = wv_a13[i] + wv_b14[i];
       os[i] = x1;);
-    uint64_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b = wv + c * (uint32_t)4U;
+    uint64_t *wv_a14 = wv + b * 4U;
+    uint64_t *wv_b = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = wv_a14;
       uint64_t x1 = wv_a14[i] ^ wv_b[i];
       os[i] = x1;);
     uint64_t *r19 = wv_a14;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *os = r19;
       uint64_t x1 = r19[i];
-      uint64_t x13 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
+      uint64_t x13 = x1 >> 63U | x1 << 1U;
       os[i] = x13;);
-    uint64_t *r113 = wv + (uint32_t)4U;
-    uint64_t *r2 = wv + (uint32_t)8U;
-    uint64_t *r3 = wv + (uint32_t)12U;
+    uint64_t *r113 = wv + 4U;
+    uint64_t *r2 = wv + 8U;
+    uint64_t *r3 = wv + 12U;
     uint64_t *r11 = r113;
     uint64_t x03 = r11[3U];
     uint64_t x13 = r11[0U];
@@ -443,36 +443,36 @@ blake2b_update_block(
     r115[2U] = x2;
     r115[3U] = x3;);
   uint64_t *s0 = hash;
-  uint64_t *s1 = hash + (uint32_t)4U;
+  uint64_t *s1 = hash + 4U;
   uint64_t *r0 = wv;
-  uint64_t *r1 = wv + (uint32_t)4U;
-  uint64_t *r2 = wv + (uint32_t)8U;
-  uint64_t *r3 = wv + (uint32_t)12U;
+  uint64_t *r1 = wv + 4U;
+  uint64_t *r2 = wv + 8U;
+  uint64_t *r3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s0;
     uint64_t x = s0[i] ^ r0[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s0;
     uint64_t x = s0[i] ^ r2[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s1;
     uint64_t x = s1[i] ^ r1[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = s1;
     uint64_t x = s1[i] ^ r3[i];
     os[i] = x;);
@@ -481,9 +481,9 @@ blake2b_update_block(
 void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
 {
   uint64_t *r0 = hash;
-  uint64_t *r1 = hash + (uint32_t)4U;
-  uint64_t *r2 = hash + (uint32_t)8U;
-  uint64_t *r3 = hash + (uint32_t)12U;
+  uint64_t *r1 = hash + 4U;
+  uint64_t *r2 = hash + 8U;
+  uint64_t *r3 = hash + 12U;
   uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
   uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
   uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
@@ -500,8 +500,8 @@ void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
   r3[1U] = iv5;
   r3[2U] = iv6;
   r3[3U] = iv7;
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
   r0[0U] = iv0_;
   r0[1U] = iv1;
   r0[2U] = iv2;
@@ -521,10 +521,10 @@ Hacl_Blake2b_32_blake2b_update_key(
   uint32_t ll
 )
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
   uint8_t b[128U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2b_update_block(wv, hash, true, lb, b);
   }
@@ -532,7 +532,7 @@ Hacl_Blake2b_32_blake2b_update_key(
   {
     blake2b_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 void
@@ -545,14 +545,14 @@ Hacl_Blake2b_32_blake2b_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
     FStar_UInt128_uint128
     totlen =
       FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
     blake2b_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -573,7 +573,7 @@ Hacl_Blake2b_32_blake2b_update_last(
   FStar_UInt128_uint128
   totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
   blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 static void
@@ -585,13 +585,13 @@ blake2b_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 128U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -607,44 +607,32 @@ blake2b_update_blocks(
 static inline void
 blake2b_update(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
   {
     Hacl_Blake2b_32_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2b_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
+  blake2b_update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
 }
 
 void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash)
 {
   uint8_t b[64U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
+  uint8_t *second = b + 32U;
   uint64_t *row0 = hash;
-  uint64_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(first + i * (uint32_t)8U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(second + i * (uint32_t)8U, row1[i]););
+  uint64_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(first + i * 8U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(second + i * 8U, row1[i]););
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 /**
@@ -672,13 +660,13 @@ Hacl_Blake2b_32_blake2b(
   Hacl_Blake2b_32_blake2b_init(b, kk, nn);
   blake2b_update(b1, b, kk, k, ll, d);
   Hacl_Blake2b_32_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint64_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint64_t);
+  Lib_Memzero0_memzero(b1, 16U, uint64_t);
+  Lib_Memzero0_memzero(b, 16U, uint64_t);
 }
 
 uint64_t *Hacl_Blake2b_32_blake2b_malloc(void)
 {
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
   return buf;
 }
 
@@ -687,11 +675,11 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
 {
   uint32_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
+    uint8_t *bj = d + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -700,52 +688,52 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
   uint32_t wv_14;
   if (flag)
   {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFU;
   }
   else
   {
-    wv_14 = (uint32_t)0U;
+    wv_14 = 0U;
   }
-  uint32_t wv_15 = (uint32_t)0U;
+  uint32_t wv_15 = 0U;
   mask[0U] = (uint32_t)totlen;
-  mask[1U] = (uint32_t)(totlen >> (uint32_t)32U);
+  mask[1U] = (uint32_t)(totlen >> 32U);
   mask[2U] = wv_14;
   mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t *wv3 = wv + (uint32_t)12U;
+  memcpy(wv, hash, 16U * sizeof (uint32_t));
+  uint32_t *wv3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = wv3;
     uint32_t x = wv3[i] ^ mask[i];
     os[i] = x;);
   KRML_MAYBE_FOR10(i0,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
     uint32_t m_st[16U] = { 0U };
     uint32_t *r0 = m_st;
-    uint32_t *r1 = m_st + (uint32_t)4U;
-    uint32_t *r20 = m_st + (uint32_t)8U;
-    uint32_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    uint32_t *r1 = m_st + 4U;
+    uint32_t *r20 = m_st + 8U;
+    uint32_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     uint32_t uu____0 = m_w[s2];
     uint32_t uu____1 = m_w[s4];
     uint32_t uu____2 = m_w[s6];
@@ -775,138 +763,138 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
     r30[2U] = uu____10;
     r30[3U] = uu____11;
     uint32_t *x = m_st;
-    uint32_t *y = m_st + (uint32_t)4U;
-    uint32_t *z = m_st + (uint32_t)8U;
-    uint32_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint32_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b0 = wv + b0 * (uint32_t)4U;
+    uint32_t *y = m_st + 4U;
+    uint32_t *z = m_st + 8U;
+    uint32_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint32_t *wv_a0 = wv + a * 4U;
+    uint32_t *wv_b0 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a0;
       uint32_t x1 = wv_a0[i] + wv_b0[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a0;
       uint32_t x1 = wv_a0[i] + x[i];
       os[i] = x1;);
-    uint32_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b1 = wv + a * (uint32_t)4U;
+    uint32_t *wv_a1 = wv + d10 * 4U;
+    uint32_t *wv_b1 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a1;
       uint32_t x1 = wv_a1[i] ^ wv_b1[i];
       os[i] = x1;);
     uint32_t *r10 = wv_a1;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r10;
       uint32_t x1 = r10[i];
-      uint32_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
+      uint32_t x10 = x1 >> 16U | x1 << 16U;
       os[i] = x10;);
-    uint32_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b2 = wv + d10 * (uint32_t)4U;
+    uint32_t *wv_a2 = wv + c0 * 4U;
+    uint32_t *wv_b2 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a2;
       uint32_t x1 = wv_a2[i] + wv_b2[i];
       os[i] = x1;);
-    uint32_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b3 = wv + c0 * (uint32_t)4U;
+    uint32_t *wv_a3 = wv + b0 * 4U;
+    uint32_t *wv_b3 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a3;
       uint32_t x1 = wv_a3[i] ^ wv_b3[i];
       os[i] = x1;);
     uint32_t *r12 = wv_a3;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r12;
       uint32_t x1 = r12[i];
-      uint32_t x10 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
+      uint32_t x10 = x1 >> 12U | x1 << 20U;
       os[i] = x10;);
-    uint32_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b4 = wv + b0 * (uint32_t)4U;
+    uint32_t *wv_a4 = wv + a * 4U;
+    uint32_t *wv_b4 = wv + b0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a4;
       uint32_t x1 = wv_a4[i] + wv_b4[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a4;
       uint32_t x1 = wv_a4[i] + y[i];
       os[i] = x1;);
-    uint32_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b5 = wv + a * (uint32_t)4U;
+    uint32_t *wv_a5 = wv + d10 * 4U;
+    uint32_t *wv_b5 = wv + a * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a5;
       uint32_t x1 = wv_a5[i] ^ wv_b5[i];
       os[i] = x1;);
     uint32_t *r13 = wv_a5;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r13;
       uint32_t x1 = r13[i];
-      uint32_t x10 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
+      uint32_t x10 = x1 >> 8U | x1 << 24U;
       os[i] = x10;);
-    uint32_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b6 = wv + d10 * (uint32_t)4U;
+    uint32_t *wv_a6 = wv + c0 * 4U;
+    uint32_t *wv_b6 = wv + d10 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a6;
       uint32_t x1 = wv_a6[i] + wv_b6[i];
       os[i] = x1;);
-    uint32_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b7 = wv + c0 * (uint32_t)4U;
+    uint32_t *wv_a7 = wv + b0 * 4U;
+    uint32_t *wv_b7 = wv + c0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a7;
       uint32_t x1 = wv_a7[i] ^ wv_b7[i];
       os[i] = x1;);
     uint32_t *r14 = wv_a7;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r14;
       uint32_t x1 = r14[i];
-      uint32_t x10 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
+      uint32_t x10 = x1 >> 7U | x1 << 25U;
       os[i] = x10;);
-    uint32_t *r15 = wv + (uint32_t)4U;
-    uint32_t *r21 = wv + (uint32_t)8U;
-    uint32_t *r31 = wv + (uint32_t)12U;
+    uint32_t *r15 = wv + 4U;
+    uint32_t *r21 = wv + 8U;
+    uint32_t *r31 = wv + 12U;
     uint32_t *r110 = r15;
     uint32_t x00 = r110[1U];
     uint32_t x10 = r110[2U];
@@ -934,135 +922,135 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
     r112[1U] = x12;
     r112[2U] = x22;
     r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint32_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b8 = wv + b * (uint32_t)4U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint32_t *wv_a = wv + a0 * 4U;
+    uint32_t *wv_b8 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a;
       uint32_t x1 = wv_a[i] + wv_b8[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a;
       uint32_t x1 = wv_a[i] + z[i];
       os[i] = x1;);
-    uint32_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b9 = wv + a0 * (uint32_t)4U;
+    uint32_t *wv_a8 = wv + d1 * 4U;
+    uint32_t *wv_b9 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a8;
       uint32_t x1 = wv_a8[i] ^ wv_b9[i];
       os[i] = x1;);
     uint32_t *r16 = wv_a8;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r16;
       uint32_t x1 = r16[i];
-      uint32_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
+      uint32_t x13 = x1 >> 16U | x1 << 16U;
       os[i] = x13;);
-    uint32_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b10 = wv + d1 * (uint32_t)4U;
+    uint32_t *wv_a9 = wv + c * 4U;
+    uint32_t *wv_b10 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a9;
       uint32_t x1 = wv_a9[i] + wv_b10[i];
       os[i] = x1;);
-    uint32_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b11 = wv + c * (uint32_t)4U;
+    uint32_t *wv_a10 = wv + b * 4U;
+    uint32_t *wv_b11 = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a10;
       uint32_t x1 = wv_a10[i] ^ wv_b11[i];
       os[i] = x1;);
     uint32_t *r17 = wv_a10;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r17;
       uint32_t x1 = r17[i];
-      uint32_t x13 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
+      uint32_t x13 = x1 >> 12U | x1 << 20U;
       os[i] = x13;);
-    uint32_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b12 = wv + b * (uint32_t)4U;
+    uint32_t *wv_a11 = wv + a0 * 4U;
+    uint32_t *wv_b12 = wv + b * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a11;
       uint32_t x1 = wv_a11[i] + wv_b12[i];
       os[i] = x1;);
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a11;
       uint32_t x1 = wv_a11[i] + w[i];
       os[i] = x1;);
-    uint32_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b13 = wv + a0 * (uint32_t)4U;
+    uint32_t *wv_a12 = wv + d1 * 4U;
+    uint32_t *wv_b13 = wv + a0 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a12;
       uint32_t x1 = wv_a12[i] ^ wv_b13[i];
       os[i] = x1;);
     uint32_t *r18 = wv_a12;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r18;
       uint32_t x1 = r18[i];
-      uint32_t x13 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
+      uint32_t x13 = x1 >> 8U | x1 << 24U;
       os[i] = x13;);
-    uint32_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b14 = wv + d1 * (uint32_t)4U;
+    uint32_t *wv_a13 = wv + c * 4U;
+    uint32_t *wv_b14 = wv + d1 * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a13;
       uint32_t x1 = wv_a13[i] + wv_b14[i];
       os[i] = x1;);
-    uint32_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b = wv + c * (uint32_t)4U;
+    uint32_t *wv_a14 = wv + b * 4U;
+    uint32_t *wv_b = wv + c * 4U;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = wv_a14;
       uint32_t x1 = wv_a14[i] ^ wv_b[i];
       os[i] = x1;);
     uint32_t *r19 = wv_a14;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *os = r19;
       uint32_t x1 = r19[i];
-      uint32_t x13 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
+      uint32_t x13 = x1 >> 7U | x1 << 25U;
       os[i] = x13;);
-    uint32_t *r113 = wv + (uint32_t)4U;
-    uint32_t *r2 = wv + (uint32_t)8U;
-    uint32_t *r3 = wv + (uint32_t)12U;
+    uint32_t *r113 = wv + 4U;
+    uint32_t *r2 = wv + 8U;
+    uint32_t *r3 = wv + 12U;
     uint32_t *r11 = r113;
     uint32_t x03 = r11[3U];
     uint32_t x13 = r11[0U];
@@ -1091,36 +1079,36 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
     r115[2U] = x2;
     r115[3U] = x3;);
   uint32_t *s0 = hash;
-  uint32_t *s1 = hash + (uint32_t)4U;
+  uint32_t *s1 = hash + 4U;
   uint32_t *r0 = wv;
-  uint32_t *r1 = wv + (uint32_t)4U;
-  uint32_t *r2 = wv + (uint32_t)8U;
-  uint32_t *r3 = wv + (uint32_t)12U;
+  uint32_t *r1 = wv + 4U;
+  uint32_t *r2 = wv + 8U;
+  uint32_t *r3 = wv + 12U;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s0;
     uint32_t x = s0[i] ^ r0[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s0;
     uint32_t x = s0[i] ^ r2[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s1;
     uint32_t x = s1[i] ^ r1[i];
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = s1;
     uint32_t x = s1[i] ^ r3[i];
     os[i] = x;);
@@ -1129,9 +1117,9 @@ blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, u
 void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
 {
   uint32_t *r0 = hash;
-  uint32_t *r1 = hash + (uint32_t)4U;
-  uint32_t *r2 = hash + (uint32_t)8U;
-  uint32_t *r3 = hash + (uint32_t)12U;
+  uint32_t *r1 = hash + 4U;
+  uint32_t *r2 = hash + 8U;
+  uint32_t *r3 = hash + 12U;
   uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
   uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
   uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
@@ -1148,8 +1136,8 @@ void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
   r3[1U] = iv5;
   r3[2U] = iv6;
   r3[3U] = iv7;
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
   r0[0U] = iv0_;
   r0[1U] = iv1;
   r0[2U] = iv2;
@@ -1169,10 +1157,10 @@ Hacl_Blake2s_32_blake2s_update_key(
   uint32_t ll
 )
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
+  uint64_t lb = (uint64_t)64U;
   uint8_t b[64U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2s_update_block(wv, hash, true, lb, b);
   }
@@ -1180,7 +1168,7 @@ Hacl_Blake2s_32_blake2s_update_key(
   {
     blake2s_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 void
@@ -1193,11 +1181,11 @@ Hacl_Blake2s_32_blake2s_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
     blake2s_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -1217,7 +1205,7 @@ Hacl_Blake2s_32_blake2s_update_last(
   memcpy(b, last, rem * sizeof (uint8_t));
   uint64_t totlen = prev + (uint64_t)len;
   blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 static void
@@ -1229,13 +1217,13 @@ blake2s_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 64U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -1251,40 +1239,32 @@ blake2s_update_blocks(
 static inline void
 blake2s_update(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
   {
     Hacl_Blake2s_32_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2s_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
+  blake2s_update_blocks(ll, wv, hash, (uint64_t)0U, d);
 }
 
 void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash)
 {
   uint8_t b[32U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
+  uint8_t *second = b + 16U;
   uint32_t *row0 = hash;
-  uint32_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(first + i * (uint32_t)4U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(second + i * (uint32_t)4U, row1[i]););
+  uint32_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(first + i * 4U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(second + i * 4U, row1[i]););
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
 }
 
 /**
@@ -1312,13 +1292,13 @@ Hacl_Blake2s_32_blake2s(
   Hacl_Blake2s_32_blake2s_init(b, kk, nn);
   blake2s_update(b1, b, kk, k, ll, d);
   Hacl_Blake2s_32_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint32_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint32_t);
+  Lib_Memzero0_memzero(b1, 16U, uint32_t);
+  Lib_Memzero0_memzero(b, 16U, uint32_t);
 }
 
 uint32_t *Hacl_Blake2s_32_blake2s_malloc(void)
 {
-  uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+  uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
   return buf;
 }
 
diff --git a/src/msvc/Hacl_Hash_Blake2b_256.c b/src/msvc/Hacl_Hash_Blake2b_256.c
index b37ffc5f..a265226b 100644
--- a/src/msvc/Hacl_Hash_Blake2b_256.c
+++ b/src/msvc/Hacl_Hash_Blake2b_256.c
@@ -40,11 +40,11 @@ blake2b_update_block(
 {
   uint64_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
+    uint8_t *bj = d + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -53,159 +53,159 @@ blake2b_update_block(
   uint64_t wv_14;
   if (flag)
   {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
   }
   else
   {
-    wv_14 = (uint64_t)0U;
+    wv_14 = 0ULL;
   }
-  uint64_t wv_15 = (uint64_t)0U;
+  uint64_t wv_15 = 0ULL;
   mask =
     Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen),
-      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)),
+      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)),
       wv_14,
       wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + (uint32_t)3U;
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + 3U;
   wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask);
   KRML_MAYBE_FOR12(i,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U };
     Lib_IntVector_Intrinsics_vec256 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
     r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
     r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
     r30[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
     Lib_IntVector_Intrinsics_vec256 *x = m_st;
-    Lib_IntVector_Intrinsics_vec256 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * 1U;
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]);
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * 1U;
     wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * (uint32_t)1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * 1U;
     wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * 1U;
     wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * (uint32_t)1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * 1U;
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]);
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * 1U;
     wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * (uint32_t)1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * 1U;
     wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * 1U;
     wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r31 = wv + (uint32_t)3U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r31 = wv + 3U;
     Lib_IntVector_Intrinsics_vec256 v00 = r10[0U];
     Lib_IntVector_Intrinsics_vec256
-    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, (uint32_t)1U);
+    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, 1U);
     r10[0U] = v1;
     Lib_IntVector_Intrinsics_vec256 v01 = r21[0U];
     Lib_IntVector_Intrinsics_vec256
-    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, (uint32_t)2U);
+    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, 2U);
     r21[0U] = v10;
     Lib_IntVector_Intrinsics_vec256 v02 = r31[0U];
     Lib_IntVector_Intrinsics_vec256
-    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, (uint32_t)3U);
+    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, 3U);
     r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * (uint32_t)1U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * 1U;
     wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]);
     wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * 1U;
     wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * (uint32_t)1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * 1U;
     wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * 1U;
     wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * (uint32_t)1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * 1U;
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]);
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * 1U;
     wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * (uint32_t)1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * 1U;
     wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * 1U;
     wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
     Lib_IntVector_Intrinsics_vec256 v0 = r11[0U];
     Lib_IntVector_Intrinsics_vec256
-    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, (uint32_t)3U);
+    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, 3U);
     r11[0U] = v12;
     Lib_IntVector_Intrinsics_vec256 v03 = r2[0U];
     Lib_IntVector_Intrinsics_vec256
-    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, (uint32_t)2U);
+    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, 2U);
     r2[0U] = v13;
     Lib_IntVector_Intrinsics_vec256 v04 = r3[0U];
     Lib_IntVector_Intrinsics_vec256
-    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, (uint32_t)1U);
+    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, 1U);
     r3[0U] = v14;);
   Lib_IntVector_Intrinsics_vec256 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *s1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec256 *s1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec256 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec256 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
   s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r0[0U]);
   s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r2[0U]);
   s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r1[0U]);
@@ -216,9 +216,9 @@ void
 Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn)
 {
   Lib_IntVector_Intrinsics_vec256 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = hash + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U;
   uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
   uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
   uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
@@ -229,8 +229,8 @@ Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk
   uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U];
   r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3);
   r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
   r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3);
   r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
 }
@@ -244,10 +244,10 @@ Hacl_Blake2b_256_blake2b_update_key(
   uint32_t ll
 )
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
   uint8_t b[128U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2b_update_block(wv, hash, true, lb, b);
   }
@@ -255,7 +255,7 @@ Hacl_Blake2b_256_blake2b_update_key(
   {
     blake2b_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 void
@@ -268,14 +268,14 @@ Hacl_Blake2b_256_blake2b_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
     FStar_UInt128_uint128
     totlen =
       FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
     blake2b_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -296,7 +296,7 @@ Hacl_Blake2b_256_blake2b_update_last(
   FStar_UInt128_uint128
   totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
   blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
 }
 
 static inline void
@@ -308,13 +308,13 @@ blake2b_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 128U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -337,22 +337,18 @@ blake2b_update(
   uint8_t *d
 )
 {
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
   {
     Hacl_Blake2b_256_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2b_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
+  blake2b_update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
 }
 
 void
@@ -364,14 +360,14 @@ Hacl_Blake2b_256_blake2b_finish(
 {
   uint8_t b[64U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
+  uint8_t *second = b + 32U;
   Lib_IntVector_Intrinsics_vec256 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *row1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec256 *row1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]);
   Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]);
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 /**
@@ -399,8 +395,8 @@ Hacl_Blake2b_256_blake2b(
   Hacl_Blake2b_256_blake2b_init(b, kk, nn);
   blake2b_update(b1, b, kk, k, ll, d);
   Hacl_Blake2b_256_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256);
 }
 
 void
@@ -410,13 +406,13 @@ Hacl_Blake2b_256_load_state256b_from_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
   uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
   r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b0[0U], b0[1U], b0[2U], b0[3U]);
   r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b1[0U], b1[1U], b1[2U], b1[3U]);
   r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b2[0U], b2[1U], b2[2U], b2[3U]);
@@ -430,21 +426,21 @@ Hacl_Blake2b_256_store_state256b_to_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
   uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
   uint8_t b8[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b8, r0[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)8U;
+    uint8_t *bj = b8 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -452,11 +448,11 @@ Hacl_Blake2b_256_store_state256b_to_state32(
   uint8_t b80[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)8U;
+    uint8_t *bj = b80 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -464,11 +460,11 @@ Hacl_Blake2b_256_store_state256b_to_state32(
   uint8_t b81[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)8U;
+    uint8_t *bj = b81 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -476,11 +472,11 @@ Hacl_Blake2b_256_store_state256b_to_state32(
   uint8_t b82[32U] = { 0U };
   Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)8U;
+    uint8_t *bj = b82 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -492,8 +488,8 @@ Lib_IntVector_Intrinsics_vec256 *Hacl_Blake2b_256_blake2b_malloc(void)
   Lib_IntVector_Intrinsics_vec256
   *buf =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   return buf;
 }
 
diff --git a/src/msvc/Hacl_Hash_Blake2s_128.c b/src/msvc/Hacl_Hash_Blake2s_128.c
index 86c4f030..0f3dea1f 100644
--- a/src/msvc/Hacl_Hash_Blake2s_128.c
+++ b/src/msvc/Hacl_Hash_Blake2s_128.c
@@ -40,11 +40,11 @@ blake2s_update_block(
 {
   uint32_t m_w[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
+    uint8_t *bj = d + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -53,159 +53,159 @@ blake2s_update_block(
   uint32_t wv_14;
   if (flag)
   {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
+    wv_14 = 0xFFFFFFFFU;
   }
   else
   {
-    wv_14 = (uint32_t)0U;
+    wv_14 = 0U;
   }
-  uint32_t wv_15 = (uint32_t)0U;
+  uint32_t wv_15 = 0U;
   mask =
     Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen,
-      (uint32_t)(totlen >> (uint32_t)32U),
+      (uint32_t)(totlen >> 32U),
       wv_14,
       wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + (uint32_t)3U;
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + 3U;
   wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask);
   KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U };
     Lib_IntVector_Intrinsics_vec128 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
+    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + 15U];
     r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
     r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
     r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
     r30[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
     Lib_IntVector_Intrinsics_vec128 *x = m_st;
-    Lib_IntVector_Intrinsics_vec128 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * 1U;
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]);
     wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * 1U;
     wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * (uint32_t)1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * 1U;
     wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * 1U;
     wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * (uint32_t)1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * 1U;
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]);
     wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * 1U;
     wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * (uint32_t)1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * 1U;
     wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * 1U;
     wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r31 = wv + (uint32_t)3U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r31 = wv + 3U;
     Lib_IntVector_Intrinsics_vec128 v00 = r10[0U];
     Lib_IntVector_Intrinsics_vec128
-    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, (uint32_t)1U);
+    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, 1U);
     r10[0U] = v1;
     Lib_IntVector_Intrinsics_vec128 v01 = r21[0U];
     Lib_IntVector_Intrinsics_vec128
-    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, (uint32_t)2U);
+    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, 2U);
     r21[0U] = v10;
     Lib_IntVector_Intrinsics_vec128 v02 = r31[0U];
     Lib_IntVector_Intrinsics_vec128
-    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, (uint32_t)3U);
+    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, 3U);
     r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * (uint32_t)1U;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * 1U;
     wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]);
     wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * 1U;
     wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * (uint32_t)1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * 1U;
     wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * 1U;
     wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * (uint32_t)1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * 1U;
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]);
     wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * 1U;
     wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * (uint32_t)1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * 1U;
     wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * (uint32_t)1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * 1U;
     wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
     Lib_IntVector_Intrinsics_vec128 v0 = r11[0U];
     Lib_IntVector_Intrinsics_vec128
-    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, (uint32_t)3U);
+    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, 3U);
     r11[0U] = v12;
     Lib_IntVector_Intrinsics_vec128 v03 = r2[0U];
     Lib_IntVector_Intrinsics_vec128
-    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, (uint32_t)2U);
+    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, 2U);
     r2[0U] = v13;
     Lib_IntVector_Intrinsics_vec128 v04 = r3[0U];
     Lib_IntVector_Intrinsics_vec128
-    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, (uint32_t)1U);
+    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, 1U);
     r3[0U] = v14;);
   Lib_IntVector_Intrinsics_vec128 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *s1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec128 *s1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec128 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec128 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
   s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r0[0U]);
   s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r2[0U]);
   s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r1[0U]);
@@ -216,9 +216,9 @@ void
 Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn)
 {
   Lib_IntVector_Intrinsics_vec128 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = hash + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U;
   uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
   uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
   uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
@@ -229,8 +229,8 @@ Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk
   uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U];
   r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3);
   r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
   r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3);
   r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
 }
@@ -244,10 +244,10 @@ Hacl_Blake2s_128_blake2s_update_key(
   uint32_t ll
 )
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
+  uint64_t lb = (uint64_t)64U;
   uint8_t b[64U] = { 0U };
   memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
+  if (ll == 0U)
   {
     blake2s_update_block(wv, hash, true, lb, b);
   }
@@ -255,7 +255,7 @@ Hacl_Blake2s_128_blake2s_update_key(
   {
     blake2s_update_block(wv, hash, false, lb, b);
   }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 void
@@ -268,11 +268,11 @@ Hacl_Blake2s_128_blake2s_update_multi(
   uint32_t nb
 )
 {
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
     blake2s_update_block(wv, hash, false, totlen, b);
   }
 }
@@ -292,7 +292,7 @@ Hacl_Blake2s_128_blake2s_update_last(
   memcpy(b, last, rem * sizeof (uint8_t));
   uint64_t totlen = prev + (uint64_t)len;
   blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
 }
 
 static inline void
@@ -304,13 +304,13 @@ blake2s_update_blocks(
   uint8_t *blocks
 )
 {
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
   K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
+  if (rem0 == 0U && nb0 > 0U)
   {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
+    uint32_t nb_ = nb0 - 1U;
+    uint32_t rem_ = 64U;
     scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
   }
   else
@@ -333,18 +333,18 @@ blake2s_update(
   uint8_t *d
 )
 {
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
   {
     Hacl_Blake2s_128_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
+    if (!(ll == 0U))
     {
       blake2s_update_blocks(ll, wv, hash, lb, d);
       return;
     }
     return;
   }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
+  blake2s_update_blocks(ll, wv, hash, (uint64_t)0U, d);
 }
 
 void
@@ -356,14 +356,14 @@ Hacl_Blake2s_128_blake2s_finish(
 {
   uint8_t b[32U] = { 0U };
   uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
+  uint8_t *second = b + 16U;
   Lib_IntVector_Intrinsics_vec128 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *row1 = hash + (uint32_t)1U;
+  Lib_IntVector_Intrinsics_vec128 *row1 = hash + 1U;
   Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]);
   Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]);
   uint8_t *final = b;
   memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
 }
 
 /**
@@ -391,8 +391,8 @@ Hacl_Blake2s_128_blake2s(
   Hacl_Blake2s_128_blake2s_init(b, kk, nn);
   blake2s_update(b1, b, kk, k, ll, d);
   Hacl_Blake2s_128_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128);
 }
 
 void
@@ -402,21 +402,21 @@ Hacl_Blake2s_128_store_state128s_to_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
   uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
   uint8_t b8[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b8, r0[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)4U;
+    uint8_t *bj = b8 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -424,11 +424,11 @@ Hacl_Blake2s_128_store_state128s_to_state32(
   uint8_t b80[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)4U;
+    uint8_t *bj = b80 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -436,11 +436,11 @@ Hacl_Blake2s_128_store_state128s_to_state32(
   uint8_t b81[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)4U;
+    uint8_t *bj = b81 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -448,11 +448,11 @@ Hacl_Blake2s_128_store_state128s_to_state32(
   uint8_t b82[16U] = { 0U };
   Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)4U;
+    uint8_t *bj = b82 + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -466,13 +466,13 @@ Hacl_Blake2s_128_load_state128s_from_state32(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
   uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
   r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b0[0U], b0[1U], b0[2U], b0[3U]);
   r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b1[0U], b1[1U], b1[2U], b1[3U]);
   r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b2[0U], b2[1U], b2[2U], b2[3U]);
@@ -484,8 +484,8 @@ Lib_IntVector_Intrinsics_vec128 *Hacl_Blake2s_128_blake2s_malloc(void)
   Lib_IntVector_Intrinsics_vec128
   *buf =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   return buf;
 }
 
diff --git a/src/msvc/Hacl_Hash_MD5.c b/src/msvc/Hacl_Hash_MD5.c
index 222ac824..8ef87a1e 100644
--- a/src/msvc/Hacl_Hash_MD5.c
+++ b/src/msvc/Hacl_Hash_MD5.c
@@ -25,34 +25,26 @@
 
 #include "internal/Hacl_Hash_MD5.h"
 
-static uint32_t
-_h0[4U] =
-  { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
+static uint32_t _h0[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
 
 static uint32_t
 _t[64U] =
   {
-    (uint32_t)0xd76aa478U, (uint32_t)0xe8c7b756U, (uint32_t)0x242070dbU, (uint32_t)0xc1bdceeeU,
-    (uint32_t)0xf57c0fafU, (uint32_t)0x4787c62aU, (uint32_t)0xa8304613U, (uint32_t)0xfd469501U,
-    (uint32_t)0x698098d8U, (uint32_t)0x8b44f7afU, (uint32_t)0xffff5bb1U, (uint32_t)0x895cd7beU,
-    (uint32_t)0x6b901122U, (uint32_t)0xfd987193U, (uint32_t)0xa679438eU, (uint32_t)0x49b40821U,
-    (uint32_t)0xf61e2562U, (uint32_t)0xc040b340U, (uint32_t)0x265e5a51U, (uint32_t)0xe9b6c7aaU,
-    (uint32_t)0xd62f105dU, (uint32_t)0x02441453U, (uint32_t)0xd8a1e681U, (uint32_t)0xe7d3fbc8U,
-    (uint32_t)0x21e1cde6U, (uint32_t)0xc33707d6U, (uint32_t)0xf4d50d87U, (uint32_t)0x455a14edU,
-    (uint32_t)0xa9e3e905U, (uint32_t)0xfcefa3f8U, (uint32_t)0x676f02d9U, (uint32_t)0x8d2a4c8aU,
-    (uint32_t)0xfffa3942U, (uint32_t)0x8771f681U, (uint32_t)0x6d9d6122U, (uint32_t)0xfde5380cU,
-    (uint32_t)0xa4beea44U, (uint32_t)0x4bdecfa9U, (uint32_t)0xf6bb4b60U, (uint32_t)0xbebfbc70U,
-    (uint32_t)0x289b7ec6U, (uint32_t)0xeaa127faU, (uint32_t)0xd4ef3085U, (uint32_t)0x4881d05U,
-    (uint32_t)0xd9d4d039U, (uint32_t)0xe6db99e5U, (uint32_t)0x1fa27cf8U, (uint32_t)0xc4ac5665U,
-    (uint32_t)0xf4292244U, (uint32_t)0x432aff97U, (uint32_t)0xab9423a7U, (uint32_t)0xfc93a039U,
-    (uint32_t)0x655b59c3U, (uint32_t)0x8f0ccc92U, (uint32_t)0xffeff47dU, (uint32_t)0x85845dd1U,
-    (uint32_t)0x6fa87e4fU, (uint32_t)0xfe2ce6e0U, (uint32_t)0xa3014314U, (uint32_t)0x4e0811a1U,
-    (uint32_t)0xf7537e82U, (uint32_t)0xbd3af235U, (uint32_t)0x2ad7d2bbU, (uint32_t)0xeb86d391U
+    0xd76aa478U, 0xe8c7b756U, 0x242070dbU, 0xc1bdceeeU, 0xf57c0fafU, 0x4787c62aU, 0xa8304613U,
+    0xfd469501U, 0x698098d8U, 0x8b44f7afU, 0xffff5bb1U, 0x895cd7beU, 0x6b901122U, 0xfd987193U,
+    0xa679438eU, 0x49b40821U, 0xf61e2562U, 0xc040b340U, 0x265e5a51U, 0xe9b6c7aaU, 0xd62f105dU,
+    0x02441453U, 0xd8a1e681U, 0xe7d3fbc8U, 0x21e1cde6U, 0xc33707d6U, 0xf4d50d87U, 0x455a14edU,
+    0xa9e3e905U, 0xfcefa3f8U, 0x676f02d9U, 0x8d2a4c8aU, 0xfffa3942U, 0x8771f681U, 0x6d9d6122U,
+    0xfde5380cU, 0xa4beea44U, 0x4bdecfa9U, 0xf6bb4b60U, 0xbebfbc70U, 0x289b7ec6U, 0xeaa127faU,
+    0xd4ef3085U, 0x4881d05U, 0xd9d4d039U, 0xe6db99e5U, 0x1fa27cf8U, 0xc4ac5665U, 0xf4292244U,
+    0x432aff97U, 0xab9423a7U, 0xfc93a039U, 0x655b59c3U, 0x8f0ccc92U, 0xffeff47dU, 0x85845dd1U,
+    0x6fa87e4fU, 0xfe2ce6e0U, 0xa3014314U, 0x4e0811a1U, 0xf7537e82U, 0xbd3af235U, 0x2ad7d2bbU,
+    0xeb86d391U
   };
 
 void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, s[i] = _h0[i];);
 }
 
 static void legacy_update(uint32_t *abcd, uint8_t *x)
@@ -74,14 +66,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb0
     +
       ((va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0)
-      << (uint32_t)7U
-      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> (uint32_t)25U);
+      << 7U
+      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> 25U);
   abcd[0U] = v;
   uint32_t va0 = abcd[3U];
   uint32_t vb1 = abcd[0U];
   uint32_t vc1 = abcd[1U];
   uint32_t vd1 = abcd[2U];
-  uint8_t *b1 = x + (uint32_t)4U;
+  uint8_t *b1 = x + 4U;
   uint32_t u0 = load32_le(b1);
   uint32_t xk0 = u0;
   uint32_t ti1 = _t[1U];
@@ -90,14 +82,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb1
     +
       ((va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1)
-      << (uint32_t)12U
-      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> (uint32_t)20U);
+      << 12U
+      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> 20U);
   abcd[3U] = v0;
   uint32_t va1 = abcd[2U];
   uint32_t vb2 = abcd[3U];
   uint32_t vc2 = abcd[0U];
   uint32_t vd2 = abcd[1U];
-  uint8_t *b2 = x + (uint32_t)8U;
+  uint8_t *b2 = x + 8U;
   uint32_t u1 = load32_le(b2);
   uint32_t xk1 = u1;
   uint32_t ti2 = _t[2U];
@@ -106,14 +98,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb2
     +
       ((va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2)
-      << (uint32_t)17U
-      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> (uint32_t)15U);
+      << 17U
+      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> 15U);
   abcd[2U] = v1;
   uint32_t va2 = abcd[1U];
   uint32_t vb3 = abcd[2U];
   uint32_t vc3 = abcd[3U];
   uint32_t vd3 = abcd[0U];
-  uint8_t *b3 = x + (uint32_t)12U;
+  uint8_t *b3 = x + 12U;
   uint32_t u2 = load32_le(b3);
   uint32_t xk2 = u2;
   uint32_t ti3 = _t[3U];
@@ -122,14 +114,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb3
     +
       ((va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3)
-      << (uint32_t)22U
-      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> (uint32_t)10U);
+      << 22U
+      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> 10U);
   abcd[1U] = v2;
   uint32_t va3 = abcd[0U];
   uint32_t vb4 = abcd[1U];
   uint32_t vc4 = abcd[2U];
   uint32_t vd4 = abcd[3U];
-  uint8_t *b4 = x + (uint32_t)16U;
+  uint8_t *b4 = x + 16U;
   uint32_t u3 = load32_le(b4);
   uint32_t xk3 = u3;
   uint32_t ti4 = _t[4U];
@@ -138,14 +130,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb4
     +
       ((va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4)
-      << (uint32_t)7U
-      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> (uint32_t)25U);
+      << 7U
+      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> 25U);
   abcd[0U] = v3;
   uint32_t va4 = abcd[3U];
   uint32_t vb5 = abcd[0U];
   uint32_t vc5 = abcd[1U];
   uint32_t vd5 = abcd[2U];
-  uint8_t *b5 = x + (uint32_t)20U;
+  uint8_t *b5 = x + 20U;
   uint32_t u4 = load32_le(b5);
   uint32_t xk4 = u4;
   uint32_t ti5 = _t[5U];
@@ -154,14 +146,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb5
     +
       ((va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5)
-      << (uint32_t)12U
-      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> (uint32_t)20U);
+      << 12U
+      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> 20U);
   abcd[3U] = v4;
   uint32_t va5 = abcd[2U];
   uint32_t vb6 = abcd[3U];
   uint32_t vc6 = abcd[0U];
   uint32_t vd6 = abcd[1U];
-  uint8_t *b6 = x + (uint32_t)24U;
+  uint8_t *b6 = x + 24U;
   uint32_t u5 = load32_le(b6);
   uint32_t xk5 = u5;
   uint32_t ti6 = _t[6U];
@@ -170,14 +162,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb6
     +
       ((va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6)
-      << (uint32_t)17U
-      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> (uint32_t)15U);
+      << 17U
+      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> 15U);
   abcd[2U] = v5;
   uint32_t va6 = abcd[1U];
   uint32_t vb7 = abcd[2U];
   uint32_t vc7 = abcd[3U];
   uint32_t vd7 = abcd[0U];
-  uint8_t *b7 = x + (uint32_t)28U;
+  uint8_t *b7 = x + 28U;
   uint32_t u6 = load32_le(b7);
   uint32_t xk6 = u6;
   uint32_t ti7 = _t[7U];
@@ -186,14 +178,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb7
     +
       ((va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7)
-      << (uint32_t)22U
-      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> (uint32_t)10U);
+      << 22U
+      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> 10U);
   abcd[1U] = v6;
   uint32_t va7 = abcd[0U];
   uint32_t vb8 = abcd[1U];
   uint32_t vc8 = abcd[2U];
   uint32_t vd8 = abcd[3U];
-  uint8_t *b8 = x + (uint32_t)32U;
+  uint8_t *b8 = x + 32U;
   uint32_t u7 = load32_le(b8);
   uint32_t xk7 = u7;
   uint32_t ti8 = _t[8U];
@@ -202,14 +194,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb8
     +
       ((va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8)
-      << (uint32_t)7U
-      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> (uint32_t)25U);
+      << 7U
+      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> 25U);
   abcd[0U] = v7;
   uint32_t va8 = abcd[3U];
   uint32_t vb9 = abcd[0U];
   uint32_t vc9 = abcd[1U];
   uint32_t vd9 = abcd[2U];
-  uint8_t *b9 = x + (uint32_t)36U;
+  uint8_t *b9 = x + 36U;
   uint32_t u8 = load32_le(b9);
   uint32_t xk8 = u8;
   uint32_t ti9 = _t[9U];
@@ -218,14 +210,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb9
     +
       ((va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9)
-      << (uint32_t)12U
-      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> (uint32_t)20U);
+      << 12U
+      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> 20U);
   abcd[3U] = v8;
   uint32_t va9 = abcd[2U];
   uint32_t vb10 = abcd[3U];
   uint32_t vc10 = abcd[0U];
   uint32_t vd10 = abcd[1U];
-  uint8_t *b10 = x + (uint32_t)40U;
+  uint8_t *b10 = x + 40U;
   uint32_t u9 = load32_le(b10);
   uint32_t xk9 = u9;
   uint32_t ti10 = _t[10U];
@@ -234,14 +226,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb10
     +
       ((va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10)
-      << (uint32_t)17U
-      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> (uint32_t)15U);
+      << 17U
+      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> 15U);
   abcd[2U] = v9;
   uint32_t va10 = abcd[1U];
   uint32_t vb11 = abcd[2U];
   uint32_t vc11 = abcd[3U];
   uint32_t vd11 = abcd[0U];
-  uint8_t *b11 = x + (uint32_t)44U;
+  uint8_t *b11 = x + 44U;
   uint32_t u10 = load32_le(b11);
   uint32_t xk10 = u10;
   uint32_t ti11 = _t[11U];
@@ -250,14 +242,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb11
     +
       ((va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11)
-      << (uint32_t)22U
-      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> (uint32_t)10U);
+      << 22U
+      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> 10U);
   abcd[1U] = v10;
   uint32_t va11 = abcd[0U];
   uint32_t vb12 = abcd[1U];
   uint32_t vc12 = abcd[2U];
   uint32_t vd12 = abcd[3U];
-  uint8_t *b12 = x + (uint32_t)48U;
+  uint8_t *b12 = x + 48U;
   uint32_t u11 = load32_le(b12);
   uint32_t xk11 = u11;
   uint32_t ti12 = _t[12U];
@@ -266,14 +258,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb12
     +
       ((va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12)
-      << (uint32_t)7U
-      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> (uint32_t)25U);
+      << 7U
+      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> 25U);
   abcd[0U] = v11;
   uint32_t va12 = abcd[3U];
   uint32_t vb13 = abcd[0U];
   uint32_t vc13 = abcd[1U];
   uint32_t vd13 = abcd[2U];
-  uint8_t *b13 = x + (uint32_t)52U;
+  uint8_t *b13 = x + 52U;
   uint32_t u12 = load32_le(b13);
   uint32_t xk12 = u12;
   uint32_t ti13 = _t[13U];
@@ -282,14 +274,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb13
     +
       ((va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13)
-      << (uint32_t)12U
-      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> (uint32_t)20U);
+      << 12U
+      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> 20U);
   abcd[3U] = v12;
   uint32_t va13 = abcd[2U];
   uint32_t vb14 = abcd[3U];
   uint32_t vc14 = abcd[0U];
   uint32_t vd14 = abcd[1U];
-  uint8_t *b14 = x + (uint32_t)56U;
+  uint8_t *b14 = x + 56U;
   uint32_t u13 = load32_le(b14);
   uint32_t xk13 = u13;
   uint32_t ti14 = _t[14U];
@@ -298,14 +290,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb14
     +
       ((va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14)
-      << (uint32_t)17U
-      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> (uint32_t)15U);
+      << 17U
+      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> 15U);
   abcd[2U] = v13;
   uint32_t va14 = abcd[1U];
   uint32_t vb15 = abcd[2U];
   uint32_t vc15 = abcd[3U];
   uint32_t vd15 = abcd[0U];
-  uint8_t *b15 = x + (uint32_t)60U;
+  uint8_t *b15 = x + 60U;
   uint32_t u14 = load32_le(b15);
   uint32_t xk14 = u14;
   uint32_t ti15 = _t[15U];
@@ -314,14 +306,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb15
     +
       ((va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15)
-      << (uint32_t)22U
-      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> (uint32_t)10U);
+      << 22U
+      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> 10U);
   abcd[1U] = v14;
   uint32_t va15 = abcd[0U];
   uint32_t vb16 = abcd[1U];
   uint32_t vc16 = abcd[2U];
   uint32_t vd16 = abcd[3U];
-  uint8_t *b16 = x + (uint32_t)4U;
+  uint8_t *b16 = x + 4U;
   uint32_t u15 = load32_le(b16);
   uint32_t xk15 = u15;
   uint32_t ti16 = _t[16U];
@@ -330,14 +322,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb16
     +
       ((va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16)
-      << (uint32_t)5U
-      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> (uint32_t)27U);
+      << 5U
+      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> 27U);
   abcd[0U] = v15;
   uint32_t va16 = abcd[3U];
   uint32_t vb17 = abcd[0U];
   uint32_t vc17 = abcd[1U];
   uint32_t vd17 = abcd[2U];
-  uint8_t *b17 = x + (uint32_t)24U;
+  uint8_t *b17 = x + 24U;
   uint32_t u16 = load32_le(b17);
   uint32_t xk16 = u16;
   uint32_t ti17 = _t[17U];
@@ -346,14 +338,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb17
     +
       ((va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17)
-      << (uint32_t)9U
-      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> (uint32_t)23U);
+      << 9U
+      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> 23U);
   abcd[3U] = v16;
   uint32_t va17 = abcd[2U];
   uint32_t vb18 = abcd[3U];
   uint32_t vc18 = abcd[0U];
   uint32_t vd18 = abcd[1U];
-  uint8_t *b18 = x + (uint32_t)44U;
+  uint8_t *b18 = x + 44U;
   uint32_t u17 = load32_le(b18);
   uint32_t xk17 = u17;
   uint32_t ti18 = _t[18U];
@@ -362,8 +354,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb18
     +
       ((va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18)
-      << (uint32_t)14U
-      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> (uint32_t)18U);
+      << 14U
+      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> 18U);
   abcd[2U] = v17;
   uint32_t va18 = abcd[1U];
   uint32_t vb19 = abcd[2U];
@@ -378,14 +370,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb19
     +
       ((va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19)
-      << (uint32_t)20U
-      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> (uint32_t)12U);
+      << 20U
+      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> 12U);
   abcd[1U] = v18;
   uint32_t va19 = abcd[0U];
   uint32_t vb20 = abcd[1U];
   uint32_t vc20 = abcd[2U];
   uint32_t vd20 = abcd[3U];
-  uint8_t *b20 = x + (uint32_t)20U;
+  uint8_t *b20 = x + 20U;
   uint32_t u19 = load32_le(b20);
   uint32_t xk19 = u19;
   uint32_t ti20 = _t[20U];
@@ -394,14 +386,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb20
     +
       ((va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20)
-      << (uint32_t)5U
-      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> (uint32_t)27U);
+      << 5U
+      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> 27U);
   abcd[0U] = v19;
   uint32_t va20 = abcd[3U];
   uint32_t vb21 = abcd[0U];
   uint32_t vc21 = abcd[1U];
   uint32_t vd21 = abcd[2U];
-  uint8_t *b21 = x + (uint32_t)40U;
+  uint8_t *b21 = x + 40U;
   uint32_t u20 = load32_le(b21);
   uint32_t xk20 = u20;
   uint32_t ti21 = _t[21U];
@@ -410,14 +402,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb21
     +
       ((va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21)
-      << (uint32_t)9U
-      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> (uint32_t)23U);
+      << 9U
+      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> 23U);
   abcd[3U] = v20;
   uint32_t va21 = abcd[2U];
   uint32_t vb22 = abcd[3U];
   uint32_t vc22 = abcd[0U];
   uint32_t vd22 = abcd[1U];
-  uint8_t *b22 = x + (uint32_t)60U;
+  uint8_t *b22 = x + 60U;
   uint32_t u21 = load32_le(b22);
   uint32_t xk21 = u21;
   uint32_t ti22 = _t[22U];
@@ -426,14 +418,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb22
     +
       ((va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22)
-      << (uint32_t)14U
-      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> (uint32_t)18U);
+      << 14U
+      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> 18U);
   abcd[2U] = v21;
   uint32_t va22 = abcd[1U];
   uint32_t vb23 = abcd[2U];
   uint32_t vc23 = abcd[3U];
   uint32_t vd23 = abcd[0U];
-  uint8_t *b23 = x + (uint32_t)16U;
+  uint8_t *b23 = x + 16U;
   uint32_t u22 = load32_le(b23);
   uint32_t xk22 = u22;
   uint32_t ti23 = _t[23U];
@@ -442,14 +434,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb23
     +
       ((va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23)
-      << (uint32_t)20U
-      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> (uint32_t)12U);
+      << 20U
+      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> 12U);
   abcd[1U] = v22;
   uint32_t va23 = abcd[0U];
   uint32_t vb24 = abcd[1U];
   uint32_t vc24 = abcd[2U];
   uint32_t vd24 = abcd[3U];
-  uint8_t *b24 = x + (uint32_t)36U;
+  uint8_t *b24 = x + 36U;
   uint32_t u23 = load32_le(b24);
   uint32_t xk23 = u23;
   uint32_t ti24 = _t[24U];
@@ -458,14 +450,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb24
     +
       ((va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24)
-      << (uint32_t)5U
-      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> (uint32_t)27U);
+      << 5U
+      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> 27U);
   abcd[0U] = v23;
   uint32_t va24 = abcd[3U];
   uint32_t vb25 = abcd[0U];
   uint32_t vc25 = abcd[1U];
   uint32_t vd25 = abcd[2U];
-  uint8_t *b25 = x + (uint32_t)56U;
+  uint8_t *b25 = x + 56U;
   uint32_t u24 = load32_le(b25);
   uint32_t xk24 = u24;
   uint32_t ti25 = _t[25U];
@@ -474,14 +466,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb25
     +
       ((va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25)
-      << (uint32_t)9U
-      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> (uint32_t)23U);
+      << 9U
+      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> 23U);
   abcd[3U] = v24;
   uint32_t va25 = abcd[2U];
   uint32_t vb26 = abcd[3U];
   uint32_t vc26 = abcd[0U];
   uint32_t vd26 = abcd[1U];
-  uint8_t *b26 = x + (uint32_t)12U;
+  uint8_t *b26 = x + 12U;
   uint32_t u25 = load32_le(b26);
   uint32_t xk25 = u25;
   uint32_t ti26 = _t[26U];
@@ -490,14 +482,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb26
     +
       ((va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26)
-      << (uint32_t)14U
-      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> (uint32_t)18U);
+      << 14U
+      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> 18U);
   abcd[2U] = v25;
   uint32_t va26 = abcd[1U];
   uint32_t vb27 = abcd[2U];
   uint32_t vc27 = abcd[3U];
   uint32_t vd27 = abcd[0U];
-  uint8_t *b27 = x + (uint32_t)32U;
+  uint8_t *b27 = x + 32U;
   uint32_t u26 = load32_le(b27);
   uint32_t xk26 = u26;
   uint32_t ti27 = _t[27U];
@@ -506,14 +498,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb27
     +
       ((va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27)
-      << (uint32_t)20U
-      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> (uint32_t)12U);
+      << 20U
+      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> 12U);
   abcd[1U] = v26;
   uint32_t va27 = abcd[0U];
   uint32_t vb28 = abcd[1U];
   uint32_t vc28 = abcd[2U];
   uint32_t vd28 = abcd[3U];
-  uint8_t *b28 = x + (uint32_t)52U;
+  uint8_t *b28 = x + 52U;
   uint32_t u27 = load32_le(b28);
   uint32_t xk27 = u27;
   uint32_t ti28 = _t[28U];
@@ -522,14 +514,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb28
     +
       ((va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28)
-      << (uint32_t)5U
-      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> (uint32_t)27U);
+      << 5U
+      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> 27U);
   abcd[0U] = v27;
   uint32_t va28 = abcd[3U];
   uint32_t vb29 = abcd[0U];
   uint32_t vc29 = abcd[1U];
   uint32_t vd29 = abcd[2U];
-  uint8_t *b29 = x + (uint32_t)8U;
+  uint8_t *b29 = x + 8U;
   uint32_t u28 = load32_le(b29);
   uint32_t xk28 = u28;
   uint32_t ti29 = _t[29U];
@@ -538,14 +530,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb29
     +
       ((va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29)
-      << (uint32_t)9U
-      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> (uint32_t)23U);
+      << 9U
+      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> 23U);
   abcd[3U] = v28;
   uint32_t va29 = abcd[2U];
   uint32_t vb30 = abcd[3U];
   uint32_t vc30 = abcd[0U];
   uint32_t vd30 = abcd[1U];
-  uint8_t *b30 = x + (uint32_t)28U;
+  uint8_t *b30 = x + 28U;
   uint32_t u29 = load32_le(b30);
   uint32_t xk29 = u29;
   uint32_t ti30 = _t[30U];
@@ -554,14 +546,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb30
     +
       ((va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30)
-      << (uint32_t)14U
-      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> (uint32_t)18U);
+      << 14U
+      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> 18U);
   abcd[2U] = v29;
   uint32_t va30 = abcd[1U];
   uint32_t vb31 = abcd[2U];
   uint32_t vc31 = abcd[3U];
   uint32_t vd31 = abcd[0U];
-  uint8_t *b31 = x + (uint32_t)48U;
+  uint8_t *b31 = x + 48U;
   uint32_t u30 = load32_le(b31);
   uint32_t xk30 = u30;
   uint32_t ti31 = _t[31U];
@@ -570,14 +562,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb31
     +
       ((va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31)
-      << (uint32_t)20U
-      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> (uint32_t)12U);
+      << 20U
+      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> 12U);
   abcd[1U] = v30;
   uint32_t va31 = abcd[0U];
   uint32_t vb32 = abcd[1U];
   uint32_t vc32 = abcd[2U];
   uint32_t vd32 = abcd[3U];
-  uint8_t *b32 = x + (uint32_t)20U;
+  uint8_t *b32 = x + 20U;
   uint32_t u31 = load32_le(b32);
   uint32_t xk31 = u31;
   uint32_t ti32 = _t[32U];
@@ -586,14 +578,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb32
     +
       ((va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32)
-      << (uint32_t)4U
-      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> (uint32_t)28U);
+      << 4U
+      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> 28U);
   abcd[0U] = v31;
   uint32_t va32 = abcd[3U];
   uint32_t vb33 = abcd[0U];
   uint32_t vc33 = abcd[1U];
   uint32_t vd33 = abcd[2U];
-  uint8_t *b33 = x + (uint32_t)32U;
+  uint8_t *b33 = x + 32U;
   uint32_t u32 = load32_le(b33);
   uint32_t xk32 = u32;
   uint32_t ti33 = _t[33U];
@@ -602,14 +594,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb33
     +
       ((va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33)
-      << (uint32_t)11U
-      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> (uint32_t)21U);
+      << 11U
+      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> 21U);
   abcd[3U] = v32;
   uint32_t va33 = abcd[2U];
   uint32_t vb34 = abcd[3U];
   uint32_t vc34 = abcd[0U];
   uint32_t vd34 = abcd[1U];
-  uint8_t *b34 = x + (uint32_t)44U;
+  uint8_t *b34 = x + 44U;
   uint32_t u33 = load32_le(b34);
   uint32_t xk33 = u33;
   uint32_t ti34 = _t[34U];
@@ -618,14 +610,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb34
     +
       ((va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34)
-      << (uint32_t)16U
-      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> (uint32_t)16U);
+      << 16U
+      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> 16U);
   abcd[2U] = v33;
   uint32_t va34 = abcd[1U];
   uint32_t vb35 = abcd[2U];
   uint32_t vc35 = abcd[3U];
   uint32_t vd35 = abcd[0U];
-  uint8_t *b35 = x + (uint32_t)56U;
+  uint8_t *b35 = x + 56U;
   uint32_t u34 = load32_le(b35);
   uint32_t xk34 = u34;
   uint32_t ti35 = _t[35U];
@@ -634,14 +626,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb35
     +
       ((va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35)
-      << (uint32_t)23U
-      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> (uint32_t)9U);
+      << 23U
+      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> 9U);
   abcd[1U] = v34;
   uint32_t va35 = abcd[0U];
   uint32_t vb36 = abcd[1U];
   uint32_t vc36 = abcd[2U];
   uint32_t vd36 = abcd[3U];
-  uint8_t *b36 = x + (uint32_t)4U;
+  uint8_t *b36 = x + 4U;
   uint32_t u35 = load32_le(b36);
   uint32_t xk35 = u35;
   uint32_t ti36 = _t[36U];
@@ -650,14 +642,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb36
     +
       ((va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36)
-      << (uint32_t)4U
-      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> (uint32_t)28U);
+      << 4U
+      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> 28U);
   abcd[0U] = v35;
   uint32_t va36 = abcd[3U];
   uint32_t vb37 = abcd[0U];
   uint32_t vc37 = abcd[1U];
   uint32_t vd37 = abcd[2U];
-  uint8_t *b37 = x + (uint32_t)16U;
+  uint8_t *b37 = x + 16U;
   uint32_t u36 = load32_le(b37);
   uint32_t xk36 = u36;
   uint32_t ti37 = _t[37U];
@@ -666,14 +658,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb37
     +
       ((va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37)
-      << (uint32_t)11U
-      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> (uint32_t)21U);
+      << 11U
+      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> 21U);
   abcd[3U] = v36;
   uint32_t va37 = abcd[2U];
   uint32_t vb38 = abcd[3U];
   uint32_t vc38 = abcd[0U];
   uint32_t vd38 = abcd[1U];
-  uint8_t *b38 = x + (uint32_t)28U;
+  uint8_t *b38 = x + 28U;
   uint32_t u37 = load32_le(b38);
   uint32_t xk37 = u37;
   uint32_t ti38 = _t[38U];
@@ -682,14 +674,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb38
     +
       ((va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38)
-      << (uint32_t)16U
-      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> (uint32_t)16U);
+      << 16U
+      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> 16U);
   abcd[2U] = v37;
   uint32_t va38 = abcd[1U];
   uint32_t vb39 = abcd[2U];
   uint32_t vc39 = abcd[3U];
   uint32_t vd39 = abcd[0U];
-  uint8_t *b39 = x + (uint32_t)40U;
+  uint8_t *b39 = x + 40U;
   uint32_t u38 = load32_le(b39);
   uint32_t xk38 = u38;
   uint32_t ti39 = _t[39U];
@@ -698,14 +690,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb39
     +
       ((va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39)
-      << (uint32_t)23U
-      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> (uint32_t)9U);
+      << 23U
+      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> 9U);
   abcd[1U] = v38;
   uint32_t va39 = abcd[0U];
   uint32_t vb40 = abcd[1U];
   uint32_t vc40 = abcd[2U];
   uint32_t vd40 = abcd[3U];
-  uint8_t *b40 = x + (uint32_t)52U;
+  uint8_t *b40 = x + 52U;
   uint32_t u39 = load32_le(b40);
   uint32_t xk39 = u39;
   uint32_t ti40 = _t[40U];
@@ -714,8 +706,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb40
     +
       ((va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40)
-      << (uint32_t)4U
-      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> (uint32_t)28U);
+      << 4U
+      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> 28U);
   abcd[0U] = v39;
   uint32_t va40 = abcd[3U];
   uint32_t vb41 = abcd[0U];
@@ -730,14 +722,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb41
     +
       ((va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41)
-      << (uint32_t)11U
-      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> (uint32_t)21U);
+      << 11U
+      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> 21U);
   abcd[3U] = v40;
   uint32_t va41 = abcd[2U];
   uint32_t vb42 = abcd[3U];
   uint32_t vc42 = abcd[0U];
   uint32_t vd42 = abcd[1U];
-  uint8_t *b42 = x + (uint32_t)12U;
+  uint8_t *b42 = x + 12U;
   uint32_t u41 = load32_le(b42);
   uint32_t xk41 = u41;
   uint32_t ti42 = _t[42U];
@@ -746,14 +738,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb42
     +
       ((va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42)
-      << (uint32_t)16U
-      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> (uint32_t)16U);
+      << 16U
+      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> 16U);
   abcd[2U] = v41;
   uint32_t va42 = abcd[1U];
   uint32_t vb43 = abcd[2U];
   uint32_t vc43 = abcd[3U];
   uint32_t vd43 = abcd[0U];
-  uint8_t *b43 = x + (uint32_t)24U;
+  uint8_t *b43 = x + 24U;
   uint32_t u42 = load32_le(b43);
   uint32_t xk42 = u42;
   uint32_t ti43 = _t[43U];
@@ -762,14 +754,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb43
     +
       ((va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43)
-      << (uint32_t)23U
-      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> (uint32_t)9U);
+      << 23U
+      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> 9U);
   abcd[1U] = v42;
   uint32_t va43 = abcd[0U];
   uint32_t vb44 = abcd[1U];
   uint32_t vc44 = abcd[2U];
   uint32_t vd44 = abcd[3U];
-  uint8_t *b44 = x + (uint32_t)36U;
+  uint8_t *b44 = x + 36U;
   uint32_t u43 = load32_le(b44);
   uint32_t xk43 = u43;
   uint32_t ti44 = _t[44U];
@@ -778,14 +770,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb44
     +
       ((va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44)
-      << (uint32_t)4U
-      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> (uint32_t)28U);
+      << 4U
+      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> 28U);
   abcd[0U] = v43;
   uint32_t va44 = abcd[3U];
   uint32_t vb45 = abcd[0U];
   uint32_t vc45 = abcd[1U];
   uint32_t vd45 = abcd[2U];
-  uint8_t *b45 = x + (uint32_t)48U;
+  uint8_t *b45 = x + 48U;
   uint32_t u44 = load32_le(b45);
   uint32_t xk44 = u44;
   uint32_t ti45 = _t[45U];
@@ -794,14 +786,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb45
     +
       ((va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45)
-      << (uint32_t)11U
-      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> (uint32_t)21U);
+      << 11U
+      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> 21U);
   abcd[3U] = v44;
   uint32_t va45 = abcd[2U];
   uint32_t vb46 = abcd[3U];
   uint32_t vc46 = abcd[0U];
   uint32_t vd46 = abcd[1U];
-  uint8_t *b46 = x + (uint32_t)60U;
+  uint8_t *b46 = x + 60U;
   uint32_t u45 = load32_le(b46);
   uint32_t xk45 = u45;
   uint32_t ti46 = _t[46U];
@@ -810,14 +802,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb46
     +
       ((va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46)
-      << (uint32_t)16U
-      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> (uint32_t)16U);
+      << 16U
+      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> 16U);
   abcd[2U] = v45;
   uint32_t va46 = abcd[1U];
   uint32_t vb47 = abcd[2U];
   uint32_t vc47 = abcd[3U];
   uint32_t vd47 = abcd[0U];
-  uint8_t *b47 = x + (uint32_t)8U;
+  uint8_t *b47 = x + 8U;
   uint32_t u46 = load32_le(b47);
   uint32_t xk46 = u46;
   uint32_t ti47 = _t[47U];
@@ -826,8 +818,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb47
     +
       ((va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47)
-      << (uint32_t)23U
-      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> (uint32_t)9U);
+      << 23U
+      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> 9U);
   abcd[1U] = v46;
   uint32_t va47 = abcd[0U];
   uint32_t vb48 = abcd[1U];
@@ -842,14 +834,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb48
     +
       ((va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48)
-      << (uint32_t)6U
-      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> (uint32_t)26U);
+      << 6U
+      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> 26U);
   abcd[0U] = v47;
   uint32_t va48 = abcd[3U];
   uint32_t vb49 = abcd[0U];
   uint32_t vc49 = abcd[1U];
   uint32_t vd49 = abcd[2U];
-  uint8_t *b49 = x + (uint32_t)28U;
+  uint8_t *b49 = x + 28U;
   uint32_t u48 = load32_le(b49);
   uint32_t xk48 = u48;
   uint32_t ti49 = _t[49U];
@@ -858,14 +850,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb49
     +
       ((va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49)
-      << (uint32_t)10U
-      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> (uint32_t)22U);
+      << 10U
+      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> 22U);
   abcd[3U] = v48;
   uint32_t va49 = abcd[2U];
   uint32_t vb50 = abcd[3U];
   uint32_t vc50 = abcd[0U];
   uint32_t vd50 = abcd[1U];
-  uint8_t *b50 = x + (uint32_t)56U;
+  uint8_t *b50 = x + 56U;
   uint32_t u49 = load32_le(b50);
   uint32_t xk49 = u49;
   uint32_t ti50 = _t[50U];
@@ -874,14 +866,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb50
     +
       ((va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50)
-      << (uint32_t)15U
-      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> (uint32_t)17U);
+      << 15U
+      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> 17U);
   abcd[2U] = v49;
   uint32_t va50 = abcd[1U];
   uint32_t vb51 = abcd[2U];
   uint32_t vc51 = abcd[3U];
   uint32_t vd51 = abcd[0U];
-  uint8_t *b51 = x + (uint32_t)20U;
+  uint8_t *b51 = x + 20U;
   uint32_t u50 = load32_le(b51);
   uint32_t xk50 = u50;
   uint32_t ti51 = _t[51U];
@@ -890,14 +882,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb51
     +
       ((va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51)
-      << (uint32_t)21U
-      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> (uint32_t)11U);
+      << 21U
+      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> 11U);
   abcd[1U] = v50;
   uint32_t va51 = abcd[0U];
   uint32_t vb52 = abcd[1U];
   uint32_t vc52 = abcd[2U];
   uint32_t vd52 = abcd[3U];
-  uint8_t *b52 = x + (uint32_t)48U;
+  uint8_t *b52 = x + 48U;
   uint32_t u51 = load32_le(b52);
   uint32_t xk51 = u51;
   uint32_t ti52 = _t[52U];
@@ -906,14 +898,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb52
     +
       ((va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52)
-      << (uint32_t)6U
-      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> (uint32_t)26U);
+      << 6U
+      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> 26U);
   abcd[0U] = v51;
   uint32_t va52 = abcd[3U];
   uint32_t vb53 = abcd[0U];
   uint32_t vc53 = abcd[1U];
   uint32_t vd53 = abcd[2U];
-  uint8_t *b53 = x + (uint32_t)12U;
+  uint8_t *b53 = x + 12U;
   uint32_t u52 = load32_le(b53);
   uint32_t xk52 = u52;
   uint32_t ti53 = _t[53U];
@@ -922,14 +914,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb53
     +
       ((va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53)
-      << (uint32_t)10U
-      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> (uint32_t)22U);
+      << 10U
+      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> 22U);
   abcd[3U] = v52;
   uint32_t va53 = abcd[2U];
   uint32_t vb54 = abcd[3U];
   uint32_t vc54 = abcd[0U];
   uint32_t vd54 = abcd[1U];
-  uint8_t *b54 = x + (uint32_t)40U;
+  uint8_t *b54 = x + 40U;
   uint32_t u53 = load32_le(b54);
   uint32_t xk53 = u53;
   uint32_t ti54 = _t[54U];
@@ -938,14 +930,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb54
     +
       ((va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54)
-      << (uint32_t)15U
-      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> (uint32_t)17U);
+      << 15U
+      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> 17U);
   abcd[2U] = v53;
   uint32_t va54 = abcd[1U];
   uint32_t vb55 = abcd[2U];
   uint32_t vc55 = abcd[3U];
   uint32_t vd55 = abcd[0U];
-  uint8_t *b55 = x + (uint32_t)4U;
+  uint8_t *b55 = x + 4U;
   uint32_t u54 = load32_le(b55);
   uint32_t xk54 = u54;
   uint32_t ti55 = _t[55U];
@@ -954,14 +946,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb55
     +
       ((va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55)
-      << (uint32_t)21U
-      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> (uint32_t)11U);
+      << 21U
+      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> 11U);
   abcd[1U] = v54;
   uint32_t va55 = abcd[0U];
   uint32_t vb56 = abcd[1U];
   uint32_t vc56 = abcd[2U];
   uint32_t vd56 = abcd[3U];
-  uint8_t *b56 = x + (uint32_t)32U;
+  uint8_t *b56 = x + 32U;
   uint32_t u55 = load32_le(b56);
   uint32_t xk55 = u55;
   uint32_t ti56 = _t[56U];
@@ -970,14 +962,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb56
     +
       ((va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56)
-      << (uint32_t)6U
-      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> (uint32_t)26U);
+      << 6U
+      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> 26U);
   abcd[0U] = v55;
   uint32_t va56 = abcd[3U];
   uint32_t vb57 = abcd[0U];
   uint32_t vc57 = abcd[1U];
   uint32_t vd57 = abcd[2U];
-  uint8_t *b57 = x + (uint32_t)60U;
+  uint8_t *b57 = x + 60U;
   uint32_t u56 = load32_le(b57);
   uint32_t xk56 = u56;
   uint32_t ti57 = _t[57U];
@@ -986,14 +978,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb57
     +
       ((va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57)
-      << (uint32_t)10U
-      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> (uint32_t)22U);
+      << 10U
+      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> 22U);
   abcd[3U] = v56;
   uint32_t va57 = abcd[2U];
   uint32_t vb58 = abcd[3U];
   uint32_t vc58 = abcd[0U];
   uint32_t vd58 = abcd[1U];
-  uint8_t *b58 = x + (uint32_t)24U;
+  uint8_t *b58 = x + 24U;
   uint32_t u57 = load32_le(b58);
   uint32_t xk57 = u57;
   uint32_t ti58 = _t[58U];
@@ -1002,14 +994,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb58
     +
       ((va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58)
-      << (uint32_t)15U
-      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> (uint32_t)17U);
+      << 15U
+      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> 17U);
   abcd[2U] = v57;
   uint32_t va58 = abcd[1U];
   uint32_t vb59 = abcd[2U];
   uint32_t vc59 = abcd[3U];
   uint32_t vd59 = abcd[0U];
-  uint8_t *b59 = x + (uint32_t)52U;
+  uint8_t *b59 = x + 52U;
   uint32_t u58 = load32_le(b59);
   uint32_t xk58 = u58;
   uint32_t ti59 = _t[59U];
@@ -1018,14 +1010,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb59
     +
       ((va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59)
-      << (uint32_t)21U
-      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> (uint32_t)11U);
+      << 21U
+      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> 11U);
   abcd[1U] = v58;
   uint32_t va59 = abcd[0U];
   uint32_t vb60 = abcd[1U];
   uint32_t vc60 = abcd[2U];
   uint32_t vd60 = abcd[3U];
-  uint8_t *b60 = x + (uint32_t)16U;
+  uint8_t *b60 = x + 16U;
   uint32_t u59 = load32_le(b60);
   uint32_t xk59 = u59;
   uint32_t ti60 = _t[60U];
@@ -1034,14 +1026,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb60
     +
       ((va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60)
-      << (uint32_t)6U
-      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> (uint32_t)26U);
+      << 6U
+      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> 26U);
   abcd[0U] = v59;
   uint32_t va60 = abcd[3U];
   uint32_t vb61 = abcd[0U];
   uint32_t vc61 = abcd[1U];
   uint32_t vd61 = abcd[2U];
-  uint8_t *b61 = x + (uint32_t)44U;
+  uint8_t *b61 = x + 44U;
   uint32_t u60 = load32_le(b61);
   uint32_t xk60 = u60;
   uint32_t ti61 = _t[61U];
@@ -1050,14 +1042,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb61
     +
       ((va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61)
-      << (uint32_t)10U
-      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> (uint32_t)22U);
+      << 10U
+      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> 22U);
   abcd[3U] = v60;
   uint32_t va61 = abcd[2U];
   uint32_t vb62 = abcd[3U];
   uint32_t vc62 = abcd[0U];
   uint32_t vd62 = abcd[1U];
-  uint8_t *b62 = x + (uint32_t)8U;
+  uint8_t *b62 = x + 8U;
   uint32_t u61 = load32_le(b62);
   uint32_t xk61 = u61;
   uint32_t ti62 = _t[62U];
@@ -1066,14 +1058,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb62
     +
       ((va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62)
-      << (uint32_t)15U
-      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> (uint32_t)17U);
+      << 15U
+      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> 17U);
   abcd[2U] = v61;
   uint32_t va62 = abcd[1U];
   uint32_t vb = abcd[2U];
   uint32_t vc = abcd[3U];
   uint32_t vd = abcd[0U];
-  uint8_t *b63 = x + (uint32_t)36U;
+  uint8_t *b63 = x + 36U;
   uint32_t u62 = load32_le(b63);
   uint32_t xk62 = u62;
   uint32_t ti = _t[63U];
@@ -1082,8 +1074,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb
     +
       ((va62 + (vc ^ (vb | ~vd)) + xk62 + ti)
-      << (uint32_t)21U
-      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> (uint32_t)11U);
+      << 21U
+      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> 11U);
   abcd[1U] = v62;
   uint32_t a = abcd[0U];
   uint32_t b = abcd[1U];
@@ -1098,42 +1090,26 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
 static void legacy_pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_le(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_le(dst3, len << 3U);
 }
 
 void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(dst + i * 4U, s[i]););
 }
 
 void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
     legacy_update(s, block);
   }
@@ -1147,20 +1123,14 @@ Hacl_Hash_MD5_legacy_update_last(
   uint32_t input_len
 )
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
   Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
@@ -1168,25 +1138,23 @@ Hacl_Hash_MD5_legacy_update_last(
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
   legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / 64U);
 }
 
 void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
 {
-  uint32_t
-  s[4U] =
-    { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -1202,10 +1170,10 @@ void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
 
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -1220,7 +1188,7 @@ void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_Hash_Core_MD5_legacy_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -1232,33 +1200,33 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
 {
   Hacl_Streaming_MD_state_32 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -1273,40 +1241,40 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -1321,7 +1289,7 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_32 s1 = *p;
@@ -1329,13 +1297,13 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -1354,39 +1322,33 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -1409,29 +1371,29 @@ void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *ds
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[4U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)4U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 4U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_Hash_MD5_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
   Hacl_Hash_Core_MD5_legacy_finish(tmp_block_state, dst);
@@ -1453,10 +1415,10 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_sta
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)4U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 4U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
diff --git a/src/msvc/Hacl_Hash_SHA1.c b/src/msvc/Hacl_Hash_SHA1.c
index 5ecb3c0b..61509182 100644
--- a/src/msvc/Hacl_Hash_SHA1.c
+++ b/src/msvc/Hacl_Hash_SHA1.c
@@ -25,16 +25,11 @@
 
 #include "internal/Hacl_Hash_SHA1.h"
 
-static uint32_t
-_h0[5U] =
-  {
-    (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-    (uint32_t)0xc3d2e1f0U
-  };
+static uint32_t _h0[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
 
 void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i] = _h0[i];);
 }
 
 static void legacy_update(uint32_t *h, uint8_t *l)
@@ -45,29 +40,26 @@ static void legacy_update(uint32_t *h, uint8_t *l)
   uint32_t hd = h[3U];
   uint32_t he = h[4U];
   uint32_t _w[80U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t v;
-    if (i < (uint32_t)16U)
+    if (i < 16U)
     {
-      uint8_t *b = l + i * (uint32_t)4U;
+      uint8_t *b = l + i * 4U;
       uint32_t u = load32_be(b);
       v = u;
     }
     else
     {
-      uint32_t wmit3 = _w[i - (uint32_t)3U];
-      uint32_t wmit8 = _w[i - (uint32_t)8U];
-      uint32_t wmit14 = _w[i - (uint32_t)14U];
-      uint32_t wmit16 = _w[i - (uint32_t)16U];
-      v =
-        (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16)))
-        << (uint32_t)1U
-        | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U;
+      uint32_t wmit3 = _w[i - 3U];
+      uint32_t wmit8 = _w[i - 8U];
+      uint32_t wmit14 = _w[i - 14U];
+      uint32_t wmit16 = _w[i - 16U];
+      v = (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) << 1U | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> 31U;
     }
     _w[i] = v;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t _a = h[0U];
     uint32_t _b = h[1U];
@@ -76,11 +68,11 @@ static void legacy_update(uint32_t *h, uint8_t *l)
     uint32_t _e = h[4U];
     uint32_t wmit = _w[i];
     uint32_t ite0;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
       ite0 = (_b & _c) ^ (~_b & _d);
     }
-    else if ((uint32_t)39U < i && i < (uint32_t)60U)
+    else if (39U < i && i < 60U)
     {
       ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d));
     }
@@ -89,32 +81,32 @@ static void legacy_update(uint32_t *h, uint8_t *l)
       ite0 = _b ^ (_c ^ _d);
     }
     uint32_t ite;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
-      ite = (uint32_t)0x5a827999U;
+      ite = 0x5a827999U;
     }
-    else if (i < (uint32_t)40U)
+    else if (i < 40U)
     {
-      ite = (uint32_t)0x6ed9eba1U;
+      ite = 0x6ed9eba1U;
     }
-    else if (i < (uint32_t)60U)
+    else if (i < 60U)
     {
-      ite = (uint32_t)0x8f1bbcdcU;
+      ite = 0x8f1bbcdcU;
     }
     else
     {
-      ite = (uint32_t)0xca62c1d6U;
+      ite = 0xca62c1d6U;
     }
-    uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit;
+    uint32_t _T = (_a << 5U | _a >> 27U) + ite0 + _e + ite + wmit;
     h[0U] = _T;
     h[1U] = _a;
-    h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U;
+    h[2U] = _b << 30U | _b >> 2U;
     h[3U] = _c;
     h[4U] = _d;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
-    _w[i] = (uint32_t)0U;
+    _w[i] = 0U;
   }
   uint32_t sta = h[0U];
   uint32_t stb = h[1U];
@@ -131,42 +123,26 @@ static void legacy_update(uint32_t *h, uint8_t *l)
 static void legacy_pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_be(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_be(dst3, len << 3U);
 }
 
 void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR5(i,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
-    store32_be(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, store32_be(dst + i * 4U, s[i]););
 }
 
 void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
     legacy_update(s, block);
   }
@@ -180,20 +156,14 @@ Hacl_Hash_SHA1_legacy_update_last(
   uint32_t input_len
 )
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
   Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
@@ -201,28 +171,23 @@ Hacl_Hash_SHA1_legacy_update_last(
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
   legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / 64U);
 }
 
 void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
 {
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -238,10 +203,10 @@ void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst
 
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -256,7 +221,7 @@ void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_Hash_Core_SHA1_legacy_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -268,33 +233,33 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
 {
   Hacl_Streaming_MD_state_32 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -309,40 +274,40 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -357,7 +322,7 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_32 s1 = *p;
@@ -365,13 +330,13 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -390,39 +355,33 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -445,29 +404,29 @@ void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *d
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[5U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 5U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
   Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst);
@@ -489,10 +448,10 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_st
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)5U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 5U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
diff --git a/src/msvc/Hacl_Hash_SHA2.c b/src/msvc/Hacl_Hash_SHA2.c
index c93c3616..934ae3e2 100644
--- a/src/msvc/Hacl_Hash_SHA2.c
+++ b/src/msvc/Hacl_Hash_SHA2.c
@@ -30,9 +30,9 @@
 void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
     os[i] = x;);
@@ -42,49 +42,49 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
 {
   uint32_t hash_old[8U] = { 0U };
   uint32_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint32_t));
   uint8_t *b10 = b;
   uint32_t u = load32_be(b10);
   ws[0U] = u;
-  uint32_t u0 = load32_be(b10 + (uint32_t)4U);
+  uint32_t u0 = load32_be(b10 + 4U);
   ws[1U] = u0;
-  uint32_t u1 = load32_be(b10 + (uint32_t)8U);
+  uint32_t u1 = load32_be(b10 + 8U);
   ws[2U] = u1;
-  uint32_t u2 = load32_be(b10 + (uint32_t)12U);
+  uint32_t u2 = load32_be(b10 + 12U);
   ws[3U] = u2;
-  uint32_t u3 = load32_be(b10 + (uint32_t)16U);
+  uint32_t u3 = load32_be(b10 + 16U);
   ws[4U] = u3;
-  uint32_t u4 = load32_be(b10 + (uint32_t)20U);
+  uint32_t u4 = load32_be(b10 + 20U);
   ws[5U] = u4;
-  uint32_t u5 = load32_be(b10 + (uint32_t)24U);
+  uint32_t u5 = load32_be(b10 + 24U);
   ws[6U] = u5;
-  uint32_t u6 = load32_be(b10 + (uint32_t)28U);
+  uint32_t u6 = load32_be(b10 + 28U);
   ws[7U] = u6;
-  uint32_t u7 = load32_be(b10 + (uint32_t)32U);
+  uint32_t u7 = load32_be(b10 + 32U);
   ws[8U] = u7;
-  uint32_t u8 = load32_be(b10 + (uint32_t)36U);
+  uint32_t u8 = load32_be(b10 + 36U);
   ws[9U] = u8;
-  uint32_t u9 = load32_be(b10 + (uint32_t)40U);
+  uint32_t u9 = load32_be(b10 + 40U);
   ws[10U] = u9;
-  uint32_t u10 = load32_be(b10 + (uint32_t)44U);
+  uint32_t u10 = load32_be(b10 + 44U);
   ws[11U] = u10;
-  uint32_t u11 = load32_be(b10 + (uint32_t)48U);
+  uint32_t u11 = load32_be(b10 + 48U);
   ws[12U] = u11;
-  uint32_t u12 = load32_be(b10 + (uint32_t)52U);
+  uint32_t u12 = load32_be(b10 + 52U);
   ws[13U] = u12;
-  uint32_t u13 = load32_be(b10 + (uint32_t)56U);
+  uint32_t u13 = load32_be(b10 + 56U);
   ws[14U] = u13;
-  uint32_t u14 = load32_be(b10 + (uint32_t)60U);
+  uint32_t u14 = load32_be(b10 + 60U);
   ws[15U] = u14;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       uint32_t ws_t = ws[i];
       uint32_t a0 = hash[0U];
       uint32_t b0 = hash[1U];
@@ -98,20 +98,13 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       uint32_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U)
-          ^
-            ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U)
-            ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U)))
+        + ((e0 << 26U | e0 >> 6U) ^ ((e0 << 21U | e0 >> 11U) ^ (e0 << 7U | e0 >> 25U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint32_t
       t2 =
-        ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U)
-        ^
-          ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U)
-          ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U)))
+        ((a0 << 30U | a0 >> 2U) ^ ((a0 << 19U | a0 >> 13U) ^ (a0 << 10U | a0 >> 22U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint32_t a1 = t1 + t2;
       uint32_t b1 = a0;
@@ -129,30 +122,24 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint32_t t16 = ws[i];
-        uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint32_t
-        s1 =
-          (t2 << (uint32_t)15U | t2 >> (uint32_t)17U)
-          ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U);
-        uint32_t
-        s0 =
-          (t15 << (uint32_t)25U | t15 >> (uint32_t)7U)
-          ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U);
+        uint32_t t15 = ws[(i + 1U) % 16U];
+        uint32_t t7 = ws[(i + 9U) % 16U];
+        uint32_t t2 = ws[(i + 14U) % 16U];
+        uint32_t s1 = (t2 << 15U | t2 >> 17U) ^ ((t2 << 13U | t2 >> 19U) ^ t2 >> 10U);
+        uint32_t s0 = (t15 << 25U | t15 >> 7U) ^ ((t15 << 14U | t15 >> 18U) ^ t15 >> 3U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = hash[i] + hash_old[i];
     os[i] = x;);
@@ -160,11 +147,11 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
 
 void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)64U;
+    uint8_t *mb = b0 + i * 64U;
     sha256_update(mb, st);
   }
 }
@@ -178,25 +165,25 @@ Hacl_SHA2_Scalar32_sha256_update_last(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[128U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)64U;
+  uint8_t *last10 = last + 64U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -204,7 +191,7 @@ Hacl_SHA2_Scalar32_sha256_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha256_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update(last1, hash);
     return;
@@ -214,20 +201,16 @@ Hacl_SHA2_Scalar32_sha256_update_last(
 void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 32U * sizeof (uint8_t));
 }
 
 void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
     os[i] = x;);
@@ -247,20 +230,16 @@ Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b,
 void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 28U * sizeof (uint8_t));
 }
 
 void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
     os[i] = x;);
@@ -270,49 +249,49 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
 {
   uint64_t hash_old[8U] = { 0U };
   uint64_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint64_t));
   uint8_t *b10 = b;
   uint64_t u = load64_be(b10);
   ws[0U] = u;
-  uint64_t u0 = load64_be(b10 + (uint32_t)8U);
+  uint64_t u0 = load64_be(b10 + 8U);
   ws[1U] = u0;
-  uint64_t u1 = load64_be(b10 + (uint32_t)16U);
+  uint64_t u1 = load64_be(b10 + 16U);
   ws[2U] = u1;
-  uint64_t u2 = load64_be(b10 + (uint32_t)24U);
+  uint64_t u2 = load64_be(b10 + 24U);
   ws[3U] = u2;
-  uint64_t u3 = load64_be(b10 + (uint32_t)32U);
+  uint64_t u3 = load64_be(b10 + 32U);
   ws[4U] = u3;
-  uint64_t u4 = load64_be(b10 + (uint32_t)40U);
+  uint64_t u4 = load64_be(b10 + 40U);
   ws[5U] = u4;
-  uint64_t u5 = load64_be(b10 + (uint32_t)48U);
+  uint64_t u5 = load64_be(b10 + 48U);
   ws[6U] = u5;
-  uint64_t u6 = load64_be(b10 + (uint32_t)56U);
+  uint64_t u6 = load64_be(b10 + 56U);
   ws[7U] = u6;
-  uint64_t u7 = load64_be(b10 + (uint32_t)64U);
+  uint64_t u7 = load64_be(b10 + 64U);
   ws[8U] = u7;
-  uint64_t u8 = load64_be(b10 + (uint32_t)72U);
+  uint64_t u8 = load64_be(b10 + 72U);
   ws[9U] = u8;
-  uint64_t u9 = load64_be(b10 + (uint32_t)80U);
+  uint64_t u9 = load64_be(b10 + 80U);
   ws[10U] = u9;
-  uint64_t u10 = load64_be(b10 + (uint32_t)88U);
+  uint64_t u10 = load64_be(b10 + 88U);
   ws[11U] = u10;
-  uint64_t u11 = load64_be(b10 + (uint32_t)96U);
+  uint64_t u11 = load64_be(b10 + 96U);
   ws[12U] = u11;
-  uint64_t u12 = load64_be(b10 + (uint32_t)104U);
+  uint64_t u12 = load64_be(b10 + 104U);
   ws[13U] = u12;
-  uint64_t u13 = load64_be(b10 + (uint32_t)112U);
+  uint64_t u13 = load64_be(b10 + 112U);
   ws[14U] = u13;
-  uint64_t u14 = load64_be(b10 + (uint32_t)120U);
+  uint64_t u14 = load64_be(b10 + 120U);
   ws[15U] = u14;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[16U * i0 + i];
       uint64_t ws_t = ws[i];
       uint64_t a0 = hash[0U];
       uint64_t b0 = hash[1U];
@@ -326,20 +305,13 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       uint64_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U)
-          ^
-            ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U)
-            ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U)))
+        + ((e0 << 50U | e0 >> 14U) ^ ((e0 << 46U | e0 >> 18U) ^ (e0 << 23U | e0 >> 41U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint64_t
       t2 =
-        ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U)
-        ^
-          ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U)
-          ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U)))
+        ((a0 << 36U | a0 >> 28U) ^ ((a0 << 30U | a0 >> 34U) ^ (a0 << 25U | a0 >> 39U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint64_t a1 = t1 + t2;
       uint64_t b1 = a0;
@@ -357,30 +329,24 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint64_t t16 = ws[i];
-        uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint64_t
-        s1 =
-          (t2 << (uint32_t)45U | t2 >> (uint32_t)19U)
-          ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U);
-        uint64_t
-        s0 =
-          (t15 << (uint32_t)63U | t15 >> (uint32_t)1U)
-          ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U);
+        uint64_t t15 = ws[(i + 1U) % 16U];
+        uint64_t t7 = ws[(i + 9U) % 16U];
+        uint64_t t2 = ws[(i + 14U) % 16U];
+        uint64_t s1 = (t2 << 45U | t2 >> 19U) ^ ((t2 << 3U | t2 >> 61U) ^ t2 >> 6U);
+        uint64_t s0 = (t15 << 63U | t15 >> 1U) ^ ((t15 << 56U | t15 >> 8U) ^ t15 >> 7U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = hash[i] + hash_old[i];
     os[i] = x;);
@@ -388,11 +354,11 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
 
 void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)128U;
+    uint8_t *mb = b0 + i * 128U;
     sha512_update(mb, st);
   }
 }
@@ -406,25 +372,25 @@ Hacl_SHA2_Scalar32_sha512_update_last(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[256U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
+  uint8_t *last10 = last + 128U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -432,7 +398,7 @@ Hacl_SHA2_Scalar32_sha512_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha512_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update(last1, hash);
     return;
@@ -442,20 +408,16 @@ Hacl_SHA2_Scalar32_sha512_update_last(
 void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)64U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 64U * sizeof (uint8_t));
 }
 
 void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
     os[i] = x;);
@@ -480,12 +442,8 @@ Hacl_SHA2_Scalar32_sha384_update_last(
 void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)48U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 48U * sizeof (uint8_t));
 }
 
 /**
@@ -494,10 +452,10 @@ calling `free_256`.
 */
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -517,10 +475,10 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -539,7 +497,7 @@ void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha256_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -548,33 +506,33 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
 {
   Hacl_Streaming_MD_state_32 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -589,42 +547,40 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_32 s1 = *p;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data1,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -639,7 +595,7 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_32 s1 = *p;
@@ -647,13 +603,13 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -672,41 +628,33 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data11,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / 64U * 64U, data11, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -752,29 +700,29 @@ void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_SHA2_Scalar32_sha256_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha256_update_last(prev_len_last + (uint64_t)r,
     r,
@@ -807,10 +755,10 @@ void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint32_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha256_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   Hacl_SHA2_Scalar32_sha256_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha256_update_last(len_, rem, lb, st);
@@ -819,10 +767,10 @@ void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *d
 
 Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
@@ -837,7 +785,7 @@ void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s)
   uint32_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha224_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -863,29 +811,29 @@ void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  sha224_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha224_update_last(prev_len_last + (uint64_t)r,
     r,
@@ -908,10 +856,10 @@ void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint32_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha224_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha224_update_last(len_, rem, lb, st);
@@ -920,10 +868,10 @@ void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *d
 
 Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
@@ -943,10 +891,10 @@ Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state
   uint64_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)128U * sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  memcpy(buf, buf0, 128U * sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_64
@@ -962,7 +910,7 @@ void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s)
   uint64_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha512_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -971,33 +919,33 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
 {
   Hacl_Streaming_MD_state_64 s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)18446744073709551615U - total_len)
+  if ((uint64_t)len > 18446744073709551615ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (len <= 128U - sz)
   {
     Hacl_Streaming_MD_state_64 s1 = *p;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -1012,42 +960,40 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_MD_state_64 s1 = *p;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)128U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data1,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
     *p
@@ -1062,7 +1008,7 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
+    uint32_t diff = 128U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_MD_state_64 s1 = *p;
@@ -1070,13 +1016,13 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -1095,41 +1041,33 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_SHA2_Scalar32_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data11,
-      block_state1);
+    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / 128U * 128U, data11, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
     *p
@@ -1175,29 +1113,29 @@ void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_SHA2_Scalar32_sha512_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
@@ -1231,10 +1169,10 @@ void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint64_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha512_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   Hacl_SHA2_Scalar32_sha512_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha512_update_last(len_, rem, lb, st);
@@ -1243,10 +1181,10 @@ void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *d
 
 Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
@@ -1261,7 +1199,7 @@ void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s)
   uint64_t *block_state = scrut.block_state;
   Hacl_SHA2_Scalar32_sha384_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -1287,29 +1225,29 @@ void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_SHA2_Scalar32_sha384_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
@@ -1333,10 +1271,10 @@ void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *d
   uint8_t *rb = dst;
   uint64_t st[8U] = { 0U };
   Hacl_SHA2_Scalar32_sha384_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   Hacl_SHA2_Scalar32_sha384_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
   Hacl_SHA2_Scalar32_sha384_update_last(len_, rem, lb, st);
diff --git a/src/msvc/Hacl_Hash_SHA3.c b/src/msvc/Hacl_Hash_SHA3.c
index 19d13b1b..51608a91 100644
--- a/src/msvc/Hacl_Hash_SHA3.c
+++ b/src/msvc/Hacl_Hash_SHA3.c
@@ -31,27 +31,27 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     default:
       {
@@ -67,19 +67,19 @@ static uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -97,7 +97,7 @@ Hacl_Hash_SHA3_update_multi_sha3(
   uint32_t n_blocks
 )
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = blocks + i * block_len(a);
     Hacl_Impl_SHA3_absorb_inner(block_len(a), block, s);
@@ -115,11 +115,11 @@ Hacl_Hash_SHA3_update_last_sha3(
   uint8_t suffix;
   if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256)
   {
-    suffix = (uint8_t)0x1fU;
+    suffix = 0x1fU;
   }
   else
   {
-    suffix = (uint8_t)0x06U;
+    suffix = 0x06U;
   }
   uint32_t len = block_len(a);
   if (input_len == len)
@@ -127,16 +127,16 @@ Hacl_Hash_SHA3_update_last_sha3(
     Hacl_Impl_SHA3_absorb_inner(len, input, s);
     uint8_t lastBlock_[200U] = { 0U };
     uint8_t *lastBlock = lastBlock_;
-    memcpy(lastBlock, input + input_len, (uint32_t)0U * sizeof (uint8_t));
+    memcpy(lastBlock, input + input_len, 0U * sizeof (uint8_t));
     lastBlock[0U] = suffix;
     Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-    if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U)
+    if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U == len - 1U)
     {
       Hacl_Impl_SHA3_state_permute(s);
     }
     uint8_t nextBlock_[200U] = { 0U };
     uint8_t *nextBlock = nextBlock_;
-    nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
+    nextBlock[len - 1U] = 0x80U;
     Hacl_Impl_SHA3_loadState(len, nextBlock, s);
     Hacl_Impl_SHA3_state_permute(s);
     return;
@@ -146,13 +146,13 @@ Hacl_Hash_SHA3_update_last_sha3(
   memcpy(lastBlock, input, input_len * sizeof (uint8_t));
   lastBlock[input_len] = suffix;
   Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-  if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && input_len == len - (uint32_t)1U)
+  if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len == len - 1U)
   {
     Hacl_Impl_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
+  nextBlock[len - 1U] = 0x80U;
   Hacl_Impl_SHA3_loadState(len, nextBlock, s);
   Hacl_Impl_SHA3_state_permute(s);
 }
@@ -174,15 +174,15 @@ Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_
 {
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
   uint8_t *buf0 = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
   Hacl_Streaming_Keccak_hash_buf block_state = { .fst = a, .snd = buf };
   Hacl_Streaming_Keccak_state
-  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)0U };
   Hacl_Streaming_Keccak_state
   *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
   p[0U] = s;
   uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+  memset(s1, 0U, 25U * sizeof (uint64_t));
   return p;
 }
 
@@ -207,12 +207,12 @@ Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_st
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(i));
   uint8_t *buf1 = (uint8_t *)KRML_HOST_CALLOC(block_len(i), sizeof (uint8_t));
   memcpy(buf1, buf0, block_len(i) * sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
   Hacl_Streaming_Keccak_hash_buf block_state = { .fst = i, .snd = buf };
   hash_buf2 scrut = { .fst = block_state0, .snd = block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
   Hacl_Streaming_Keccak_state
   s = { .block_state = block_state, .buf = buf1, .total_len = total_len0 };
   Hacl_Streaming_Keccak_state
@@ -227,11 +227,11 @@ void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s)
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  KRML_HOST_IGNORE(i);
+  KRML_MAYBE_UNUSED_VAR(i);
   uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+  memset(s1, 0U, 25U * sizeof (uint64_t));
   Hacl_Streaming_Keccak_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -242,12 +242,12 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
   Hacl_Streaming_Keccak_hash_buf block_state = s.block_state;
   uint64_t total_len = s.total_len;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  if ((uint64_t)len > (uint64_t)0xFFFFFFFFFFFFFFFFU - total_len)
+  if ((uint64_t)len > 0xFFFFFFFFFFFFFFFFULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)block_len(i) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(i) == 0ULL && total_len > 0ULL)
   {
     sz = block_len(i);
   }
@@ -262,7 +262,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -283,14 +283,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Keccak_state s1 = *p;
     Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -298,14 +298,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
       Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)block_len(i) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)block_len(i) == 0ULL && (uint64_t)len > 0ULL)
     {
       ite = block_len(i);
     }
@@ -343,7 +343,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)block_len(i) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)block_len(i) == 0ULL && total_len10 > 0ULL)
     {
       sz10 = block_len(i);
     }
@@ -368,7 +368,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -376,20 +376,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
       Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)block_len(i)
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)block_len(i) == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
       ite = block_len(i);
     }
@@ -433,7 +427,7 @@ finish_(
   uint8_t *buf_ = scrut0.buf;
   uint64_t total_len = scrut0.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)block_len(a) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(a) == 0ULL && total_len > 0ULL)
   {
     r = block_len(a);
   }
@@ -447,9 +441,9 @@ finish_(
   hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % block_len(a) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(a) == 0U && r > 0U)
   {
     ite = block_len(a);
   }
@@ -461,7 +455,7 @@ finish_(
   uint8_t *buf_multi = buf_1;
   Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst;
   uint64_t *s0 = tmp_block_state.snd;
-  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, (uint32_t)0U / block_len(a1));
+  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, 0U / block_len(a1));
   Spec_Hash_Definitions_hash_alg a10 = tmp_block_state.fst;
   uint64_t *s1 = tmp_block_state.snd;
   Hacl_Hash_SHA3_update_last_sha3(a10, s1, buf_last, r);
@@ -495,7 +489,7 @@ Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint
   {
     return Hacl_Streaming_Types_InvalidAlgorithm;
   }
-  if (l == (uint32_t)0U)
+  if (l == 0U)
   {
     return Hacl_Streaming_Types_InvalidLength;
   }
@@ -529,13 +523,7 @@ Hacl_SHA3_shake128_hacl(
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1344U,
-    (uint32_t)256U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Impl_SHA3_keccak(1344U, 256U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
 void
@@ -546,169 +534,99 @@ Hacl_SHA3_shake256_hacl(
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Impl_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
 void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1152U,
-    (uint32_t)448U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)28U,
-    output);
+  Hacl_Impl_SHA3_keccak(1152U, 448U, inputByteLen, input, 0x06U, 28U, output);
 }
 
 void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)32U,
-    output);
+  Hacl_Impl_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x06U, 32U, output);
 }
 
 void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)832U,
-    (uint32_t)768U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)48U,
-    output);
+  Hacl_Impl_SHA3_keccak(832U, 768U, inputByteLen, input, 0x06U, 48U, output);
 }
 
 void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)576U,
-    (uint32_t)1024U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)64U,
-    output);
+  Hacl_Impl_SHA3_keccak(576U, 1024U, inputByteLen, input, 0x06U, 64U, output);
 }
 
 static const
 uint32_t
 keccak_rotc[24U] =
   {
-    (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, (uint32_t)15U, (uint32_t)21U,
-    (uint32_t)28U, (uint32_t)36U, (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U,
-    (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, (uint32_t)25U, (uint32_t)43U,
-    (uint32_t)62U, (uint32_t)18U, (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U
+    1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U,
+    39U, 61U, 20U, 44U
   };
 
 static const
 uint32_t
 keccak_piln[24U] =
   {
-    (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, (uint32_t)18U, (uint32_t)3U,
-    (uint32_t)5U, (uint32_t)16U, (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U,
-    (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)20U, (uint32_t)14U, (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U
+    10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U,
+    22U, 9U, 6U, 1U
   };
 
 static const
 uint64_t
 keccak_rndc[24U] =
   {
-    (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, (uint64_t)0x800000000000808aU,
-    (uint64_t)0x8000000080008000U, (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U,
-    (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, (uint64_t)0x000000000000008aU,
-    (uint64_t)0x0000000000000088U, (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU,
-    (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, (uint64_t)0x8000000000008089U,
-    (uint64_t)0x8000000000008003U, (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U,
-    (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, (uint64_t)0x8000000080008081U,
-    (uint64_t)0x8000000000008080U, (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U
+    0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
+    0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
+    0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+    0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
+    0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL,
+    0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
   };
 
 void Hacl_Impl_SHA3_state_permute(uint64_t *s)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++)
+  for (uint32_t i0 = 0U; i0 < 24U; i0++)
   {
     uint64_t _C[5U] = { 0U };
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      _C[i] =
-        s[i
-        + (uint32_t)0U]
-        ^
-          (s[i
-          + (uint32_t)5U]
-          ^ (s[i + (uint32_t)10U] ^ (s[i + (uint32_t)15U] ^ s[i + (uint32_t)20U]))););
+      0U,
+      5U,
+      1U,
+      _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U]))););
     KRML_MAYBE_FOR5(i1,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t uu____0 = _C[(i1 + (uint32_t)1U) % (uint32_t)5U];
-      uint64_t
-      _D =
-        _C[(i1 + (uint32_t)4U)
-        % (uint32_t)5U]
-        ^ (uu____0 << (uint32_t)1U | uu____0 >> (uint32_t)63U);
-      KRML_MAYBE_FOR5(i,
-        (uint32_t)0U,
-        (uint32_t)5U,
-        (uint32_t)1U,
-        s[i1 + (uint32_t)5U * i] = s[i1 + (uint32_t)5U * i] ^ _D;););
+      0U,
+      5U,
+      1U,
+      uint64_t uu____0 = _C[(i1 + 1U) % 5U];
+      uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U);
+      KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;););
     uint64_t x = s[1U];
     uint64_t current = x;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+    for (uint32_t i = 0U; i < 24U; i++)
     {
       uint32_t _Y = keccak_piln[i];
       uint32_t r = keccak_rotc[i];
       uint64_t temp = s[_Y];
       uint64_t uu____1 = current;
-      s[_Y] = uu____1 << r | uu____1 >> ((uint32_t)64U - r);
+      s[_Y] = uu____1 << r | uu____1 >> (64U - r);
       current = temp;
     }
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t
-      v0 =
-        s[(uint32_t)0U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)1U + (uint32_t)5U * i] & s[(uint32_t)2U + (uint32_t)5U * i]);
-      uint64_t
-      v1 =
-        s[(uint32_t)1U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)2U + (uint32_t)5U * i] & s[(uint32_t)3U + (uint32_t)5U * i]);
-      uint64_t
-      v2 =
-        s[(uint32_t)2U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)3U + (uint32_t)5U * i] & s[(uint32_t)4U + (uint32_t)5U * i]);
-      uint64_t
-      v3 =
-        s[(uint32_t)3U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)4U + (uint32_t)5U * i] & s[(uint32_t)0U + (uint32_t)5U * i]);
-      uint64_t
-      v4 =
-        s[(uint32_t)4U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)0U + (uint32_t)5U * i] & s[(uint32_t)1U + (uint32_t)5U * i]);
-      s[(uint32_t)0U + (uint32_t)5U * i] = v0;
-      s[(uint32_t)1U + (uint32_t)5U * i] = v1;
-      s[(uint32_t)2U + (uint32_t)5U * i] = v2;
-      s[(uint32_t)3U + (uint32_t)5U * i] = v3;
-      s[(uint32_t)4U + (uint32_t)5U * i] = v4;);
+      0U,
+      5U,
+      1U,
+      uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]);
+      uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]);
+      uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]);
+      uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]);
+      uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]);
+      s[0U + 5U * i] = v0;
+      s[1U + 5U * i] = v1;
+      s[2U + 5U * i] = v2;
+      s[3U + 5U * i] = v3;
+      s[4U + 5U * i] = v4;);
     uint64_t c = keccak_rndc[i0];
     s[0U] = s[0U] ^ c;
   }
@@ -718,9 +636,9 @@ void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 {
   uint8_t block[200U] = { 0U };
   memcpy(block, input, rateInBytes * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    uint64_t u = load64_le(block + i * (uint32_t)8U);
+    uint64_t u = load64_le(block + i * 8U);
     uint64_t x = u;
     s[i] = s[i] ^ x;
   }
@@ -729,10 +647,10 @@ void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res)
 {
   uint8_t block[200U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
     uint64_t sj = s[i];
-    store64_le(block + i * (uint32_t)8U, sj);
+    store64_le(block + i * 8U, sj);
   }
   memcpy(res, block, rateInBytes * sizeof (uint8_t));
 }
@@ -754,7 +672,7 @@ absorb(
 {
   uint32_t n_blocks = inputByteLen / rateInBytes;
   uint32_t rem = inputByteLen % rateInBytes;
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = input + i * rateInBytes;
     Hacl_Impl_SHA3_absorb_inner(rateInBytes, block, s);
@@ -765,13 +683,13 @@ absorb(
   memcpy(lastBlock, last, rem * sizeof (uint8_t));
   lastBlock[rem] = delimitedSuffix;
   Hacl_Impl_SHA3_loadState(rateInBytes, lastBlock, s);
-  if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && rem == rateInBytes - (uint32_t)1U)
+  if (!(((uint32_t)delimitedSuffix & 0x80U) == 0U) && rem == rateInBytes - 1U)
   {
     Hacl_Impl_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U;
+  nextBlock[rateInBytes - 1U] = 0x80U;
   Hacl_Impl_SHA3_loadState(rateInBytes, nextBlock, s);
   Hacl_Impl_SHA3_state_permute(s);
 }
@@ -788,7 +706,7 @@ Hacl_Impl_SHA3_squeeze(
   uint32_t remOut = outputByteLen % rateInBytes;
   uint8_t *last = output + outputByteLen - remOut;
   uint8_t *blocks = output;
-  for (uint32_t i = (uint32_t)0U; i < outBlocks; i++)
+  for (uint32_t i = 0U; i < outBlocks; i++)
   {
     storeState(rateInBytes, s, blocks + i * rateInBytes);
     Hacl_Impl_SHA3_state_permute(s);
@@ -807,8 +725,8 @@ Hacl_Impl_SHA3_keccak(
   uint8_t *output
 )
 {
-  KRML_HOST_IGNORE(capacity);
-  uint32_t rateInBytes = rate / (uint32_t)8U;
+  KRML_MAYBE_UNUSED_VAR(capacity);
+  uint32_t rateInBytes = rate / 8U;
   uint64_t s[25U] = { 0U };
   absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix);
   Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output);
diff --git a/src/msvc/Hacl_K256_ECDSA.c b/src/msvc/Hacl_K256_ECDSA.c
index c5dda43f..5bc98c73 100644
--- a/src/msvc/Hacl_K256_ECDSA.c
+++ b/src/msvc/Hacl_K256_ECDSA.c
@@ -35,27 +35,27 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 {
   uint64_t *a0 = a;
   uint64_t *res0 = res;
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < bLen / 4U; i++)
   {
-    uint64_t t1 = a0[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res0 + (uint32_t)4U * i;
+    uint64_t t1 = a0[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res0 + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a0[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res0 + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a0[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res0 + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a0[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res0 + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a0[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res0 + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a0[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res0 + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a0[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res0 + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = bLen / (uint32_t)4U * (uint32_t)4U; i < bLen; i++)
+  for (uint32_t i = bLen / 4U * 4U; i < bLen; i++)
   {
     uint64_t t1 = a0[i];
     uint64_t t2 = b[i];
@@ -68,26 +68,26 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
     uint64_t *a1 = a + bLen;
     uint64_t *res1 = res + bLen;
     uint64_t c = c00;
-    for (uint32_t i = (uint32_t)0U; i < (aLen - bLen) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (aLen - bLen) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
-    for (uint32_t i = (aLen - bLen) / (uint32_t)4U * (uint32_t)4U; i < aLen - bLen; i++)
+    for (uint32_t i = (aLen - bLen) / 4U * 4U; i < aLen - bLen; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
     return c1;
@@ -97,23 +97,23 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 
 static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -121,52 +121,52 @@ static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -174,53 +174,53 @@ static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -228,59 +228,59 @@ static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void mul4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static void sqr4(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -288,30 +288,30 @@ static void sqr4(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline uint64_t is_qelem_zero(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -325,33 +325,33 @@ static inline bool is_qelem_zero_vartime(uint64_t *f)
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  return f0 == (uint64_t)0U && f1 == (uint64_t)0U && f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL;
 }
 
 static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   uint64_t is_zero = is_qelem_zero(f);
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(f[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(f[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t is_lt_q = acc;
   return ~is_zero & is_lt_q;
 }
@@ -359,11 +359,11 @@ static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   bool is_zero = is_qelem_zero_vartime(f);
@@ -372,29 +372,29 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   bool is_lt_q_b;
-  if (a3 < (uint64_t)0xffffffffffffffffU)
+  if (a3 < 0xffffffffffffffffULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 < (uint64_t)0xfffffffffffffffeU)
+  else if (a2 < 0xfffffffffffffffeULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 > (uint64_t)0xfffffffffffffffeU)
+  else if (a2 > 0xfffffffffffffffeULL)
   {
     is_lt_q_b = false;
   }
-  else if (a1 < (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 < 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = true;
   }
-  else if (a1 > (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 > 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = false;
   }
   else
   {
-    is_lt_q_b = a0 < (uint64_t)0xbfd25e8cd0364141U;
+    is_lt_q_b = a0 < 0xbfd25e8cd0364141ULL;
   }
   return !is_zero && is_lt_q_b;
 }
@@ -402,16 +402,16 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 static inline void modq_short(uint64_t *out, uint64_t *a)
 {
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t c = add4(a, tmp, out);
-  uint64_t mask = (uint64_t)0U - c;
+  uint64_t mask = 0ULL - c;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & a[i]);
     os[i] = x;);
@@ -421,35 +421,31 @@ static inline void load_qelem_modq(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
-  memcpy(tmp, f, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, f, 4U * sizeof (uint64_t));
   modq_short(f, tmp);
 }
 
 static inline void store_qelem(uint8_t *b, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void qadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   add_mod4(n, f1, f2, out);
 }
 
@@ -463,33 +459,33 @@ mul_pow2_256_minus_q_add(
   uint64_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + (uint32_t)2U);
-  uint64_t *tmp = (uint64_t *)alloca((len + (uint32_t)2U) * sizeof (uint64_t));
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), len + 2U);
+  uint64_t *tmp = (uint64_t *)alloca((len + 2U) * sizeof (uint64_t));
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
   KRML_MAYBE_FOR2(i0,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint64_t bj = t01[i0];
     uint64_t *res_j = tmp + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -497,9 +493,9 @@ mul_pow2_256_minus_q_add(
     }
     uint64_t r = c;
     tmp[len + i0] = r;);
-  memcpy(res + (uint32_t)2U, a, len * sizeof (uint64_t));
-  KRML_HOST_IGNORE(bn_add(resLen, res, len + (uint32_t)2U, tmp, res));
-  uint64_t c = bn_add(resLen, res, (uint32_t)4U, e, res);
+  memcpy(res + 2U, a, len * sizeof (uint64_t));
+  bn_add(resLen, res, len + 2U, tmp, res);
+  uint64_t c = bn_add(resLen, res, 4U, e, res);
   return c;
 }
 
@@ -507,34 +503,23 @@ static inline void modq(uint64_t *out, uint64_t *a)
 {
   uint64_t r[4U] = { 0U };
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t *t01 = tmp;
   uint64_t m[7U] = { 0U };
   uint64_t p[5U] = { 0U };
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)4U,
-      (uint32_t)7U,
-      t01,
-      a + (uint32_t)4U,
-      a,
-      m));
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)3U,
-      (uint32_t)5U,
-      t01,
-      m + (uint32_t)4U,
-      m,
-      p));
-  uint64_t
-  c2 = mul_pow2_256_minus_q_add((uint32_t)1U, (uint32_t)4U, t01, p + (uint32_t)4U, p, r);
+  mul_pow2_256_minus_q_add(4U, 7U, t01, a + 4U, a, m);
+  mul_pow2_256_minus_q_add(3U, 5U, t01, m + 4U, m, p);
+  uint64_t c2 = mul_pow2_256_minus_q_add(1U, 4U, t01, p + 4U, p, r);
   uint64_t c0 = c2;
   uint64_t c1 = add4(r, tmp, out);
-  uint64_t mask = (uint64_t)0U - (c0 + c1);
+  uint64_t mask = 0ULL - (c0 + c1);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & r[i]);
     os[i] = x;);
@@ -557,10 +542,10 @@ static inline void qsqr(uint64_t *out, uint64_t *f)
 static inline void qnegate_conditional_vartime(uint64_t *f, bool is_negate)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   uint64_t zero[4U] = { 0U };
   if (is_negate)
   {
@@ -574,31 +559,31 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f)
   uint64_t a1 = f[1U];
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
-  if (a3 < (uint64_t)0x7fffffffffffffffU)
+  if (a3 < 0x7fffffffffffffffULL)
   {
     return true;
   }
-  if (a3 > (uint64_t)0x7fffffffffffffffU)
+  if (a3 > 0x7fffffffffffffffULL)
   {
     return false;
   }
-  if (a2 < (uint64_t)0xffffffffffffffffU)
+  if (a2 < 0xffffffffffffffffULL)
   {
     return true;
   }
-  if (a2 > (uint64_t)0xffffffffffffffffU)
+  if (a2 > 0xffffffffffffffffULL)
   {
     return false;
   }
-  if (a1 < (uint64_t)0x5d576e7357a4501dU)
+  if (a1 < 0x5d576e7357a4501dULL)
   {
     return true;
   }
-  if (a1 > (uint64_t)0x5d576e7357a4501dU)
+  if (a1 > 0x5d576e7357a4501dULL)
   {
     return false;
   }
-  return a0 <= (uint64_t)0xdfe92f46681b20a0U;
+  return a0 <= 0xdfe92f46681b20a0ULL;
 }
 
 static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
@@ -606,27 +591,26 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
   uint64_t l[8U] = { 0U };
   mul4(a, b, l);
   uint64_t res_b_padded[4U] = { 0U };
-  memcpy(res_b_padded, l + (uint32_t)6U, (uint32_t)2U * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, res_b_padded[0U], (uint64_t)1U, res);
-  uint64_t *a1 = res_b_padded + (uint32_t)1U;
-  uint64_t *res1 = res + (uint32_t)1U;
+  memcpy(res_b_padded, l + 6U, 2U * sizeof (uint64_t));
+  uint64_t c0 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, res_b_padded[0U], 1ULL, res);
+  uint64_t *a1 = res_b_padded + 1U;
+  uint64_t *res1 = res + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t flag = l[5U] >> (uint32_t)63U;
-  uint64_t mask = (uint64_t)0U - flag;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t flag = l[5U] >> 63U;
+  uint64_t mask = 0ULL - flag;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (mask & res[i]) | (~mask & res_b_padded[i]);
     os[i] = x;);
@@ -634,7 +618,7 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
 
 static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -642,8 +626,8 @@ static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 
 static inline void qsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -658,7 +642,7 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x_1001[4U] = { 0U };
   uint64_t x_1011[4U] = { 0U };
   uint64_t x_1101[4U] = { 0U };
-  qsquare_times(x_10, f, (uint32_t)1U);
+  qsquare_times(x_10, f, 1U);
   qmul(x_11, x_10, f);
   qmul(x_101, x_10, x_11);
   qmul(x_111, x_10, x_101);
@@ -668,89 +652,89 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x6[4U] = { 0U };
   uint64_t x8[4U] = { 0U };
   uint64_t x14[4U] = { 0U };
-  qsquare_times(x6, x_1101, (uint32_t)2U);
+  qsquare_times(x6, x_1101, 2U);
   qmul(x6, x6, x_1011);
-  qsquare_times(x8, x6, (uint32_t)2U);
+  qsquare_times(x8, x6, 2U);
   qmul(x8, x8, x_11);
-  qsquare_times(x14, x8, (uint32_t)6U);
+  qsquare_times(x14, x8, 6U);
   qmul(x14, x14, x6);
   uint64_t x56[4U] = { 0U };
-  qsquare_times(out, x14, (uint32_t)14U);
+  qsquare_times(out, x14, 14U);
   qmul(out, out, x14);
-  qsquare_times(x56, out, (uint32_t)28U);
+  qsquare_times(x56, out, 28U);
   qmul(x56, x56, out);
-  qsquare_times(out, x56, (uint32_t)56U);
+  qsquare_times(out, x56, 56U);
   qmul(out, out, x56);
-  qsquare_times_in_place(out, (uint32_t)14U);
+  qsquare_times_in_place(out, 14U);
   qmul(out, out, x14);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)9U);
+  qsquare_times_in_place(out, 9U);
   qmul(out, out, x8);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_11);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, f);
-  qsquare_times_in_place(out, (uint32_t)8U);
+  qsquare_times_in_place(out, 8U);
   qmul(out, out, x6);
 }
 
 void Hacl_Impl_K256_Point_make_point_at_inf(uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
-  memset(px, 0U, (uint32_t)5U * sizeof (uint64_t));
-  memset(py, 0U, (uint32_t)5U * sizeof (uint64_t));
-  py[0U] = (uint64_t)1U;
-  memset(pz, 0U, (uint32_t)5U * sizeof (uint64_t));
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
+  memset(px, 0U, 5U * sizeof (uint64_t));
+  memset(py, 0U, 5U * sizeof (uint64_t));
+  py[0U] = 1ULL;
+  memset(pz, 0U, 5U * sizeof (uint64_t));
 }
 
 static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 {
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -762,7 +746,7 @@ static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 static inline void to_aff_point_x(uint64_t *x, uint64_t *p)
 {
   uint64_t *x1 = p;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -773,13 +757,13 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 {
   uint64_t y2_exp[5U] = { 0U };
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
+  uint64_t *y = p + 5U;
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2_exp, x);
   Hacl_K256_Field_fmul(y2_exp, y2_exp, x);
   Hacl_K256_Field_fadd(y2_exp, y2_exp, b);
@@ -795,11 +779,11 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t *ox = out;
-  uint64_t *oy = out + (uint32_t)5U;
-  uint64_t *oz = out + (uint32_t)10U;
+  uint64_t *oy = out + 5U;
+  uint64_t *oz = out + 10U;
   ox[0U] = px[0U];
   ox[1U] = px[1U];
   ox[2U] = px[2U];
@@ -815,11 +799,11 @@ void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
   uint64_t a2 = py[2U];
   uint64_t a3 = py[3U];
   uint64_t a4 = py[4U];
-  uint64_t r0 = (uint64_t)18014381329608892U - a0;
-  uint64_t r1 = (uint64_t)18014398509481980U - a1;
-  uint64_t r2 = (uint64_t)18014398509481980U - a2;
-  uint64_t r3 = (uint64_t)18014398509481980U - a3;
-  uint64_t r4 = (uint64_t)1125899906842620U - a4;
+  uint64_t r0 = 18014381329608892ULL - a0;
+  uint64_t r1 = 18014398509481980ULL - a1;
+  uint64_t r2 = 18014398509481980ULL - a2;
+  uint64_t r3 = 18014398509481980ULL - a3;
+  uint64_t r4 = 1125899906842620ULL - a4;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
   uint64_t f2 = r2;
@@ -845,9 +829,9 @@ static inline void point_negate_conditional_vartime(uint64_t *p, bool is_negate)
 static inline void aff_point_store(uint8_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
+  uint64_t *py = p + 5U;
   Hacl_K256_Field_store_felem(out, px);
-  Hacl_K256_Field_store_felem(out + (uint32_t)32U, py);
+  Hacl_K256_Field_store_felem(out + 32U, py);
 }
 
 void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
@@ -860,9 +844,9 @@ void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
 bool Hacl_Impl_K256_Point_aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *px = b;
-  uint8_t *py = b + (uint32_t)32U;
+  uint8_t *py = b + 32U;
   uint64_t *bn_px = p;
-  uint64_t *bn_py = p + (uint32_t)5U;
+  uint64_t *bn_py = p + 5U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_px, px);
   bool is_y_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_py, py);
   if (is_x_valid && is_y_valid)
@@ -879,14 +863,14 @@ static inline bool load_point_vartime(uint64_t *p, uint8_t *b)
   if (res)
   {
     uint64_t *x = p_aff;
-    uint64_t *y = p_aff + (uint32_t)5U;
+    uint64_t *y = p_aff + 5U;
     uint64_t *x1 = p;
-    uint64_t *y1 = p + (uint32_t)5U;
-    uint64_t *z1 = p + (uint32_t)10U;
-    memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-    memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-    z1[0U] = (uint64_t)1U;
+    uint64_t *y1 = p + 5U;
+    uint64_t *z1 = p + 10U;
+    memcpy(x1, x, 5U * sizeof (uint64_t));
+    memcpy(y1, y, 5U * sizeof (uint64_t));
+    memset(z1, 0U, 5U * sizeof (uint64_t));
+    z1[0U] = 1ULL;
   }
   return res;
 }
@@ -895,24 +879,24 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(x, xb);
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid)
   {
     return false;
   }
   uint64_t y2[5U] = { 0U };
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2, x);
   Hacl_K256_Field_fmul(y2, y2, x);
   Hacl_K256_Field_fadd(y2, y2, b);
@@ -930,7 +914,7 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
     return false;
   }
   uint64_t x0 = y[0U];
-  bool is_y_odd1 = (x0 & (uint64_t)1U) == (uint64_t)1U;
+  bool is_y_odd1 = (x0 & 1ULL) == 1ULL;
   Hacl_K256_Field_fnegate_conditional_vartime(y, is_y_odd1 != is_y_odd);
   return true;
 }
@@ -939,33 +923,33 @@ void Hacl_Impl_K256_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[25U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *yy = tmp;
-  uint64_t *zz = tmp + (uint32_t)5U;
-  uint64_t *bzz3 = tmp + (uint32_t)10U;
-  uint64_t *bzz9 = tmp + (uint32_t)15U;
-  uint64_t *tmp1 = tmp + (uint32_t)20U;
+  uint64_t *zz = tmp + 5U;
+  uint64_t *bzz3 = tmp + 10U;
+  uint64_t *bzz9 = tmp + 15U;
+  uint64_t *tmp1 = tmp + 20U;
   Hacl_K256_Field_fsqr(yy, y1);
   Hacl_K256_Field_fsqr(zz, z1);
-  Hacl_K256_Field_fmul_small_num(x3, x1, (uint64_t)2U);
+  Hacl_K256_Field_fmul_small_num(x3, x1, 2ULL);
   Hacl_K256_Field_fmul(x3, x3, y1);
   Hacl_K256_Field_fmul(tmp1, yy, y1);
   Hacl_K256_Field_fmul(z3, tmp1, z1);
-  Hacl_K256_Field_fmul_small_num(z3, z3, (uint64_t)8U);
+  Hacl_K256_Field_fmul_small_num(z3, z3, 8ULL);
   Hacl_K256_Field_fnormalize_weak(z3, z3);
-  Hacl_K256_Field_fmul_small_num(bzz3, zz, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(bzz3, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(bzz3, bzz3);
-  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, (uint64_t)3U);
-  Hacl_K256_Field_fsub(bzz9, yy, bzz9, (uint64_t)6U);
+  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, 3ULL);
+  Hacl_K256_Field_fsub(bzz9, yy, bzz9, 6ULL);
   Hacl_K256_Field_fadd(tmp1, yy, bzz3);
   Hacl_K256_Field_fmul(tmp1, bzz9, tmp1);
   Hacl_K256_Field_fmul(y3, yy, zz);
   Hacl_K256_Field_fmul(x3, x3, bzz9);
-  Hacl_K256_Field_fmul_small_num(y3, y3, (uint64_t)168U);
+  Hacl_K256_Field_fmul_small_num(y3, y3, 168ULL);
   Hacl_K256_Field_fadd(y3, tmp1, y3);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
 }
@@ -974,23 +958,23 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[45U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
-  uint64_t *z2 = q + (uint32_t)10U;
+  uint64_t *y2 = q + 5U;
+  uint64_t *z2 = q + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *xx = tmp;
-  uint64_t *yy = tmp + (uint32_t)5U;
-  uint64_t *zz = tmp + (uint32_t)10U;
-  uint64_t *xy_pairs = tmp + (uint32_t)15U;
-  uint64_t *yz_pairs = tmp + (uint32_t)20U;
-  uint64_t *xz_pairs = tmp + (uint32_t)25U;
-  uint64_t *yy_m_bzz3 = tmp + (uint32_t)30U;
-  uint64_t *yy_p_bzz3 = tmp + (uint32_t)35U;
-  uint64_t *tmp1 = tmp + (uint32_t)40U;
+  uint64_t *yy = tmp + 5U;
+  uint64_t *zz = tmp + 10U;
+  uint64_t *xy_pairs = tmp + 15U;
+  uint64_t *yz_pairs = tmp + 20U;
+  uint64_t *xz_pairs = tmp + 25U;
+  uint64_t *yy_m_bzz3 = tmp + 30U;
+  uint64_t *yy_p_bzz3 = tmp + 35U;
+  uint64_t *tmp1 = tmp + 40U;
   Hacl_K256_Field_fmul(xx, x1, x2);
   Hacl_K256_Field_fmul(yy, y1, y2);
   Hacl_K256_Field_fmul(zz, z1, z2);
@@ -998,29 +982,29 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
   Hacl_K256_Field_fadd(tmp1, x2, y2);
   Hacl_K256_Field_fmul(xy_pairs, xy_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, yy);
-  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(yz_pairs, y1, z1);
   Hacl_K256_Field_fadd(tmp1, y2, z2);
   Hacl_K256_Field_fmul(yz_pairs, yz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, yy, zz);
-  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(xz_pairs, x1, z1);
   Hacl_K256_Field_fadd(tmp1, x2, z2);
   Hacl_K256_Field_fmul(xz_pairs, xz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, zz);
-  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, (uint64_t)4U);
-  Hacl_K256_Field_fmul_small_num(tmp1, zz, (uint64_t)21U);
+  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, 4ULL);
+  Hacl_K256_Field_fmul_small_num(tmp1, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(tmp1, tmp1);
-  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, (uint64_t)2U);
+  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, 2ULL);
   Hacl_K256_Field_fadd(yy_p_bzz3, yy, tmp1);
-  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, 21ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
-  Hacl_K256_Field_fmul_small_num(z3, xx, (uint64_t)3U);
-  Hacl_K256_Field_fmul_small_num(y3, z3, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(z3, xx, 3ULL);
+  Hacl_K256_Field_fmul_small_num(y3, z3, 21ULL);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
   Hacl_K256_Field_fmul(tmp1, xy_pairs, yy_m_bzz3);
   Hacl_K256_Field_fmul(x3, x3, xz_pairs);
-  Hacl_K256_Field_fsub(x3, tmp1, x3, (uint64_t)2U);
+  Hacl_K256_Field_fsub(x3, tmp1, x3, 2ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
   Hacl_K256_Field_fmul(tmp1, yy_p_bzz3, yy_m_bzz3);
   Hacl_K256_Field_fmul(y3, y3, xz_pairs);
@@ -1036,30 +1020,30 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 {
   uint64_t tmp1[4U] = { 0U };
   uint64_t tmp2[4U] = { 0U };
-  tmp1[0U] = (uint64_t)0xe893209a45dbb031U;
-  tmp1[1U] = (uint64_t)0x3daa8a1471e8ca7fU;
-  tmp1[2U] = (uint64_t)0xe86c90e49284eb15U;
-  tmp1[3U] = (uint64_t)0x3086d221a7d46bcdU;
-  tmp2[0U] = (uint64_t)0x1571b4ae8ac47f71U;
-  tmp2[1U] = (uint64_t)0x221208ac9df506c6U;
-  tmp2[2U] = (uint64_t)0x6f547fa90abfe4c4U;
-  tmp2[3U] = (uint64_t)0xe4437ed6010e8828U;
+  tmp1[0U] = 0xe893209a45dbb031ULL;
+  tmp1[1U] = 0x3daa8a1471e8ca7fULL;
+  tmp1[2U] = 0xe86c90e49284eb15ULL;
+  tmp1[3U] = 0x3086d221a7d46bcdULL;
+  tmp2[0U] = 0x1571b4ae8ac47f71ULL;
+  tmp2[1U] = 0x221208ac9df506c6ULL;
+  tmp2[2U] = 0x6f547fa90abfe4c4ULL;
+  tmp2[3U] = 0xe4437ed6010e8828ULL;
   qmul_shift_384(r1, k, tmp1);
   qmul_shift_384(r2, k, tmp2);
-  tmp1[0U] = (uint64_t)0x6f547fa90abfe4c3U;
-  tmp1[1U] = (uint64_t)0xe4437ed6010e8828U;
-  tmp1[2U] = (uint64_t)0x0U;
-  tmp1[3U] = (uint64_t)0x0U;
-  tmp2[0U] = (uint64_t)0xd765cda83db1562cU;
-  tmp2[1U] = (uint64_t)0x8a280ac50774346dU;
-  tmp2[2U] = (uint64_t)0xfffffffffffffffeU;
-  tmp2[3U] = (uint64_t)0xffffffffffffffffU;
+  tmp1[0U] = 0x6f547fa90abfe4c3ULL;
+  tmp1[1U] = 0xe4437ed6010e8828ULL;
+  tmp1[2U] = 0x0ULL;
+  tmp1[3U] = 0x0ULL;
+  tmp2[0U] = 0xd765cda83db1562cULL;
+  tmp2[1U] = 0x8a280ac50774346dULL;
+  tmp2[2U] = 0xfffffffffffffffeULL;
+  tmp2[3U] = 0xffffffffffffffffULL;
   qmul(r1, r1, tmp1);
   qmul(r2, r2, tmp2);
-  tmp1[0U] = (uint64_t)0xe0cfc810b51283cfU;
-  tmp1[1U] = (uint64_t)0xa880b9fc8ec739c2U;
-  tmp1[2U] = (uint64_t)0x5ad9e3fd77ed9ba4U;
-  tmp1[3U] = (uint64_t)0xac9c52b33fa3cf1fU;
+  tmp1[0U] = 0xe0cfc810b51283cfULL;
+  tmp1[1U] = 0xa880b9fc8ec739c2ULL;
+  tmp1[2U] = 0x5ad9e3fd77ed9ba4ULL;
+  tmp1[3U] = 0xac9c52b33fa3cf1fULL;
   qadd(r2, r1, r2);
   qmul(tmp2, r2, tmp1);
   qadd(r1, k, tmp2);
@@ -1068,17 +1052,17 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 static inline void point_mul_lambda(uint64_t *res, uint64_t *p)
 {
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)5U;
-  uint64_t *rz = res + (uint32_t)10U;
+  uint64_t *ry = res + 5U;
+  uint64_t *rz = res + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, px);
   ry[0U] = py[0U];
   ry[1U] = py[1U];
@@ -1096,11 +1080,11 @@ static inline void point_mul_lambda_inplace(uint64_t *res)
 {
   uint64_t *rx = res;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, rx);
 }
 
@@ -1123,7 +1107,7 @@ ecmult_endo_split(
 {
   scalar_split_lambda(r1, r2, scalar);
   point_mul_lambda(q2, q);
-  memcpy(q1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(q1, q, 15U * sizeof (uint64_t));
   bool b0 = is_qelem_le_q_halved_vartime(r1);
   qnegate_conditional_vartime(r1, !b0);
   point_negate_conditional_vartime(q1, !b0);
@@ -1140,45 +1124,37 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
   uint64_t table[240U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)15U;
+  uint64_t *t1 = table + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp0[15U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 15U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)15U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 15U;
       KRML_MAYBE_FOR15(i,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
+        0U,
+        15U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -1188,17 +1164,17 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 15U;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
+      0U,
+      15U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1208,79 +1184,72 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar)
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t
   q2[15U] =
     {
-      (uint64_t)4496295042185355U, (uint64_t)3125448202219451U, (uint64_t)1239608518490046U,
-      (uint64_t)2687445637493112U, (uint64_t)77979604880139U, (uint64_t)3360310474215011U,
-      (uint64_t)1216410458165163U, (uint64_t)177901593587973U, (uint64_t)3209978938104985U,
-      (uint64_t)118285133003718U, (uint64_t)434519962075150U, (uint64_t)1114612377498854U,
-      (uint64_t)3488596944003813U, (uint64_t)450716531072892U, (uint64_t)66044973203836U
+      4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+      77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+      3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+      3488596944003813ULL, 450716531072892ULL, 66044973203836ULL
     };
-  KRML_HOST_IGNORE(q2);
+  KRML_MAYBE_UNUSED_VAR(q2);
   uint64_t
   q3[15U] =
     {
-      (uint64_t)1277614565900951U, (uint64_t)378671684419493U, (uint64_t)3176260448102880U,
-      (uint64_t)1575691435565077U, (uint64_t)167304528382180U, (uint64_t)2600787765776588U,
-      (uint64_t)7497946149293U, (uint64_t)2184272641272202U, (uint64_t)2200235265236628U,
-      (uint64_t)265969268774814U, (uint64_t)1913228635640715U, (uint64_t)2831959046949342U,
-      (uint64_t)888030405442963U, (uint64_t)1817092932985033U, (uint64_t)101515844997121U
+      1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+      167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+      2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+      888030405442963ULL, 1817092932985033ULL, 101515844997121ULL
     };
-  KRML_HOST_IGNORE(q3);
+  KRML_MAYBE_UNUSED_VAR(q3);
   uint64_t
   q4[15U] =
     {
-      (uint64_t)34056422761564U, (uint64_t)3315864838337811U, (uint64_t)3797032336888745U,
-      (uint64_t)2580641850480806U, (uint64_t)208048944042500U, (uint64_t)1233795288689421U,
-      (uint64_t)1048795233382631U, (uint64_t)646545158071530U, (uint64_t)1816025742137285U,
-      (uint64_t)12245672982162U, (uint64_t)2119364213800870U, (uint64_t)2034960311715107U,
-      (uint64_t)3172697815804487U, (uint64_t)4185144850224160U, (uint64_t)2792055915674U
+      34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+      208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+      1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+      3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL
     };
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q4);
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp[15U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp););
 }
@@ -1290,75 +1259,65 @@ point_mul_g_double_vartime(uint64_t *out, uint64_t *scalar1, uint64_t *scalar2,
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp1[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   }
 }
@@ -1380,99 +1339,89 @@ point_mul_g_double_split_lambda_table(
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, p2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, p2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, p2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
   uint64_t tmp1[15U] = { 0U };
-  uint32_t i0 = (uint32_t)125U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, i0, (uint32_t)5U);
+  uint32_t i0 = 125U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(out, is_negate1);
-  uint32_t i1 = (uint32_t)125U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, i1, (uint32_t)5U);
+  uint32_t i1 = 125U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
   const
   uint64_t
-  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp1, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * 15U;
+  memcpy(tmp1, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp1, is_negate2);
   point_mul_lambda_inplace(tmp1);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   uint64_t tmp10[15U] = { 0U };
-  uint32_t i2 = (uint32_t)125U;
-  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, i2, (uint32_t)5U);
+  uint32_t i2 = 125U;
+  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, i2, 5U);
   uint32_t bits_l321 = (uint32_t)bits_c1;
-  const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp0, is_negate3);
-  uint32_t i3 = (uint32_t)125U;
-  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, i3, (uint32_t)5U);
+  uint32_t i3 = 125U;
+  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, i3, 5U);
   uint32_t bits_l322 = (uint32_t)bits_c2;
-  const uint64_t *a_bits_l2 = table2 + bits_l322 * (uint32_t)15U;
-  memcpy(tmp10, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l2 = table2 + bits_l322 * 15U;
+  memcpy(tmp10, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp10, is_negate4);
   point_mul_lambda_inplace(tmp10);
   Hacl_Impl_K256_PointAdd_point_add(tmp0, tmp0, tmp10);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp2[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    KRML_MAYBE_FOR5(i4,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i4, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 125U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, k, 5U);
     uint32_t bits_l323 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l3 = table2 + bits_l323 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l3, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l3 = table2 + bits_l323 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l3, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate4);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k0 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, k0, (uint32_t)5U);
+    uint32_t k0 = 125U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, k0, 5U);
     uint32_t bits_l324 = (uint32_t)bits_l0;
-    const uint64_t *a_bits_l4 = table2 + bits_l324 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l4, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l4 = table2 + bits_l324 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l4, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate3);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k1 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, k1, (uint32_t)5U);
+    uint32_t k1 = 125U - 5U * i - 5U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, k1, 5U);
     uint32_t bits_l325 = (uint32_t)bits_l1;
     const
     uint64_t
-    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l5, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l5, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate2);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k2 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, k2, (uint32_t)5U);
+    uint32_t k2 = 125U - 5U * i - 5U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, k2, 5U);
     uint32_t bits_l326 = (uint32_t)bits_l2;
     const
     uint64_t
-    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l6, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l6, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate1);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
   }
@@ -1483,16 +1432,16 @@ check_ecmult_endo_split(uint64_t *r1, uint64_t *r2, uint64_t *r3, uint64_t *r4)
 {
   uint64_t f20 = r1[2U];
   uint64_t f30 = r1[3U];
-  bool b1 = f20 == (uint64_t)0U && f30 == (uint64_t)0U;
+  bool b1 = f20 == 0ULL && f30 == 0ULL;
   uint64_t f21 = r2[2U];
   uint64_t f31 = r2[3U];
-  bool b2 = f21 == (uint64_t)0U && f31 == (uint64_t)0U;
+  bool b2 = f21 == 0ULL && f31 == 0ULL;
   uint64_t f22 = r3[2U];
   uint64_t f32 = r3[3U];
-  bool b3 = f22 == (uint64_t)0U && f32 == (uint64_t)0U;
+  bool b3 = f22 == 0ULL && f32 == 0ULL;
   uint64_t f2 = r4[2U];
   uint64_t f3 = r4[3U];
-  bool b4 = f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  bool b4 = f2 == 0ULL && f3 == 0ULL;
   return b1 && b2 && b3 && b4;
 }
 
@@ -1515,30 +1464,30 @@ point_mul_g_double_split_lambda_vartime(
 {
   uint64_t g[15U] = { 0U };
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t r1234[16U] = { 0U };
   uint64_t q1234[60U] = { 0U };
   uint64_t *r1 = r1234;
-  uint64_t *r2 = r1234 + (uint32_t)4U;
-  uint64_t *r3 = r1234 + (uint32_t)8U;
-  uint64_t *r4 = r1234 + (uint32_t)12U;
+  uint64_t *r2 = r1234 + 4U;
+  uint64_t *r3 = r1234 + 8U;
+  uint64_t *r4 = r1234 + 12U;
   uint64_t *q1 = q1234;
-  uint64_t *q2 = q1234 + (uint32_t)15U;
-  uint64_t *q3 = q1234 + (uint32_t)30U;
-  uint64_t *q4 = q1234 + (uint32_t)45U;
+  uint64_t *q2 = q1234 + 15U;
+  uint64_t *q3 = q1234 + 30U;
+  uint64_t *q4 = q1234 + 45U;
   __bool_bool scrut0 = ecmult_endo_split(r1, r2, q1, q2, scalar1, g);
   bool is_high10 = scrut0.fst;
   bool is_high20 = scrut0.snd;
@@ -1615,30 +1564,30 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   uint8_t *nonce
 )
 {
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
-  KRML_HOST_IGNORE(oneq);
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
+  KRML_MAYBE_UNUSED_VAR(oneq);
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   uint64_t is_b_valid0 = load_qelem_check(d_a, private_key);
-  uint64_t oneq10[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq10[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq10[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
     os[i] = x;);
   uint64_t is_sk_valid = is_b_valid0;
   uint64_t is_b_valid = load_qelem_check(k_q, nonce);
-  uint64_t oneq1[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq1[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq1[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1660,11 +1609,11 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   qadd(s_q, z, s_q);
   qmul(s_q, kinv, s_q);
   store_qelem(signature, r_q);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   uint64_t is_r_zero = is_qelem_zero(r_q);
   uint64_t is_s_zero = is_qelem_zero(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1713,14 +1662,14 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
 {
   uint64_t tmp[35U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)15U;
-  uint64_t *s_q = tmp + (uint32_t)19U;
-  uint64_t *u1 = tmp + (uint32_t)23U;
-  uint64_t *u2 = tmp + (uint32_t)27U;
-  uint64_t *m_q = tmp + (uint32_t)31U;
+  uint64_t *r_q = tmp + 15U;
+  uint64_t *s_q = tmp + 19U;
+  uint64_t *u1 = tmp + 23U;
+  uint64_t *u2 = tmp + 27U;
+  uint64_t *m_q = tmp + 31U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bool is_r_valid = load_qelem_vartime(r_q, signature);
-  bool is_s_valid = load_qelem_vartime(s_q, signature + (uint32_t)32U);
+  bool is_s_valid = load_qelem_vartime(s_q, signature + 32U);
   bool is_rs_valid = is_r_valid && is_s_valid;
   load_qelem_modq(m_q, m);
   if (!(is_pk_valid && is_rs_valid))
@@ -1734,7 +1683,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
   uint64_t res[15U] = { 0U };
   point_mul_g_double_split_lambda_vartime(res, u1, u2, pk);
   uint64_t tmp1[5U] = { 0U };
-  uint64_t *pz = res + (uint32_t)10U;
+  uint64_t *pz = res + 10U;
   Hacl_K256_Field_fnormalize(tmp1, pz);
   bool b = Hacl_K256_Field_is_felem_zero_vartime(tmp1);
   if (b)
@@ -1742,7 +1691,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     return false;
   }
   uint64_t *x = res;
-  uint64_t *z = res + (uint32_t)10U;
+  uint64_t *z = res + 10U;
   uint8_t r_bytes[32U] = { 0U };
   uint64_t r_fe[5U] = { 0U };
   uint64_t tmp_q[5U] = { 0U };
@@ -1756,11 +1705,11 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     bool is_r_lt_p_m_q = Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(r_fe);
     if (is_r_lt_p_m_q)
     {
-      tmp_q[0U] = (uint64_t)0x25e8cd0364141U;
-      tmp_q[1U] = (uint64_t)0xe6af48a03bbfdU;
-      tmp_q[2U] = (uint64_t)0xffffffebaaedcU;
-      tmp_q[3U] = (uint64_t)0xfffffffffffffU;
-      tmp_q[4U] = (uint64_t)0xffffffffffffU;
+      tmp_q[0U] = 0x25e8cd0364141ULL;
+      tmp_q[1U] = 0xe6af48a03bbfdULL;
+      tmp_q[2U] = 0xffffffebaaedcULL;
+      tmp_q[3U] = 0xfffffffffffffULL;
+      tmp_q[4U] = 0xffffffffffffULL;
       Hacl_K256_Field_fadd(tmp_q, r_fe, tmp_q);
       return fmul_eq_vartime(tmp_q, z, tmp_x);
     }
@@ -1805,7 +1754,7 @@ Compute canonical lowest S value for `signature` (R || S).
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_sk_valid = load_qelem_vartime(s_q, s);
   if (!is_sk_valid)
   {
@@ -1813,7 +1762,7 @@ bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
   }
   bool is_sk_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   qnegate_conditional_vartime(s_q, !is_sk_lt_q_halved);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   return true;
 }
 
@@ -1827,7 +1776,7 @@ Check whether `signature` (R || S) is in canonical form.
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_s_valid = load_qelem_vartime(s_q, s);
   bool is_s_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   return is_s_valid && is_s_lt_q_halved;
@@ -1971,11 +1920,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_K256_ECDSA_public_key_uncompressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1989,8 +1938,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_K256_ECDSA_public_key_uncompressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -2007,12 +1956,12 @@ bool Hacl_K256_ECDSA_public_key_compressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint64_t xa[5U] = { 0U };
   uint64_t ya[5U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    Hacl_K256_Field_store_felem(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    Hacl_K256_Field_store_felem(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -2028,20 +1977,20 @@ Convert a public key from raw to its compressed form.
 void Hacl_K256_ECDSA_public_key_compressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint8_t x0 = pk_y[31U];
-  bool is_pk_y_odd = (x0 & (uint8_t)1U) == (uint8_t)1U;
+  bool is_pk_y_odd = ((uint32_t)x0 & 1U) == 1U;
   uint8_t ite;
   if (is_pk_y_odd)
   {
-    ite = (uint8_t)0x03U;
+    ite = 0x03U;
   }
   else
   {
-    ite = (uint8_t)0x02U;
+    ite = 0x02U;
   }
   pk[0U] = ite;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
@@ -2084,7 +2033,7 @@ bool Hacl_K256_ECDSA_is_private_key_valid(uint8_t *private_key)
 {
   uint64_t s_q[4U] = { 0U };
   uint64_t res = load_qelem_check(s_q, private_key);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -2107,13 +2056,13 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[19U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *sk = tmp + (uint32_t)15U;
+  uint64_t *sk = tmp + 15U;
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2121,7 +2070,7 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   Hacl_Impl_K256_Point_point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -2140,15 +2089,15 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
 {
   uint64_t tmp[34U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *ss = tmp + (uint32_t)15U;
-  uint64_t *sk = tmp + (uint32_t)30U;
+  uint64_t *ss = tmp + 15U;
+  uint64_t *sk = tmp + 30U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2159,6 +2108,6 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
     Hacl_Impl_K256_PointMul_point_mul(ss, sk, pk);
     Hacl_Impl_K256_Point_point_store(shared_secret, ss);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
diff --git a/src/msvc/Hacl_NaCl.c b/src/msvc/Hacl_NaCl.c
index 37104040..8a64c531 100644
--- a/src/msvc/Hacl_NaCl.c
+++ b/src/msvc/Hacl_NaCl.c
@@ -30,9 +30,9 @@
 static void secretbox_init(uint8_t *xkeys, uint8_t *k, uint8_t *n)
 {
   uint8_t *subkey = xkeys;
-  uint8_t *aekey = xkeys + (uint32_t)32U;
+  uint8_t *aekey = xkeys + 32U;
   uint8_t *n0 = n;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *n1 = n + 16U;
   Hacl_Salsa20_hsalsa20(subkey, k, n0);
   Hacl_Salsa20_salsa20_key_block0(aekey, subkey, n1);
 }
@@ -42,34 +42,34 @@ secretbox_detached(uint32_t mlen, uint8_t *c, uint8_t *tag, uint8_t *k, uint8_t
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *mkey = xkeys + 32U;
+  uint8_t *n1 = n + 16U;
   uint8_t *subkey = xkeys;
-  uint8_t *ekey0 = xkeys + (uint32_t)64U;
+  uint8_t *ekey0 = xkeys + 64U;
   uint32_t mlen0;
-  if (mlen <= (uint32_t)32U)
+  if (mlen <= 32U)
   {
     mlen0 = mlen;
   }
   else
   {
-    mlen0 = (uint32_t)32U;
+    mlen0 = 32U;
   }
   uint32_t mlen1 = mlen - mlen0;
   uint8_t *m0 = m;
   uint8_t *m1 = m + mlen0;
   uint8_t block0[32U] = { 0U };
   memcpy(block0, m0, mlen0 * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = block0;
-    uint8_t x = block0[i] ^ ekey0[i];
+    uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
     os[i] = x;
   }
   uint8_t *c0 = c;
   uint8_t *c1 = c + mlen0;
   memcpy(c0, block0, mlen0 * sizeof (uint8_t));
-  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, (uint32_t)1U);
+  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, 1U);
   Hacl_Poly1305_32_poly1305_mac(tag, mlen, c, mkey);
 }
 
@@ -85,55 +85,55 @@ secretbox_open_detached(
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
+  uint8_t *mkey = xkeys + 32U;
   uint8_t tag_[16U] = { 0U };
   Hacl_Poly1305_32_poly1305_mac(tag_, mlen, c, mkey);
-  uint8_t res = (uint8_t)255U;
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(tag[i], tag_[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
     uint8_t *subkey = xkeys;
-    uint8_t *ekey0 = xkeys + (uint32_t)64U;
-    uint8_t *n1 = n + (uint32_t)16U;
+    uint8_t *ekey0 = xkeys + 64U;
+    uint8_t *n1 = n + 16U;
     uint32_t mlen0;
-    if (mlen <= (uint32_t)32U)
+    if (mlen <= 32U)
     {
       mlen0 = mlen;
     }
     else
     {
-      mlen0 = (uint32_t)32U;
+      mlen0 = 32U;
     }
     uint32_t mlen1 = mlen - mlen0;
     uint8_t *c0 = c;
     uint8_t *c1 = c + mlen0;
     uint8_t block0[32U] = { 0U };
     memcpy(block0, c0, mlen0 * sizeof (uint8_t));
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t *os = block0;
-      uint8_t x = block0[i] ^ ekey0[i];
+      uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
       os[i] = x;
     }
     uint8_t *m0 = m;
     uint8_t *m1 = m + mlen0;
     memcpy(m0, block0, mlen0 * sizeof (uint8_t));
-    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, 1U);
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static void secretbox_easy(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   secretbox_detached(mlen, cip, tag, k, n, m);
 }
 
@@ -141,7 +141,7 @@ static uint32_t
 secretbox_open_easy(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return secretbox_open_detached(mlen, m, k, n, cip, tag);
 }
 
@@ -152,9 +152,9 @@ static inline uint32_t box_beforenm(uint8_t *k, uint8_t *pk, uint8_t *sk)
   if (r)
   {
     Hacl_Salsa20_hsalsa20(k, k, n0);
-    return (uint32_t)0U;
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -168,7 +168,7 @@ box_detached_afternm(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 static inline uint32_t
@@ -184,11 +184,11 @@ box_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_detached_afternm(mlen, c, tag, k, n, m);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -217,18 +217,18 @@ box_open_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_open_detached_afternm(mlen, m, k, n, c, tag);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
 box_easy_afternm(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached_afternm(mlen, cip, tag, k, n, m);
   return res;
 }
@@ -237,7 +237,7 @@ static inline uint32_t
 box_easy(uint32_t mlen, uint8_t *c, uint8_t *sk, uint8_t *pk, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached(mlen, cip, tag, sk, pk, n, m);
   return res;
 }
@@ -246,7 +246,7 @@ static inline uint32_t
 box_open_easy_afternm(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached_afternm(mlen, m, k, n, cip, tag);
 }
 
@@ -254,7 +254,7 @@ static inline uint32_t
 box_open_easy(uint32_t mlen, uint8_t *m, uint8_t *pk, uint8_t *sk, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached(mlen, m, pk, sk, n, cip, tag);
 }
 
@@ -281,7 +281,7 @@ Hacl_NaCl_crypto_secretbox_detached(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -322,7 +322,7 @@ uint32_t
 Hacl_NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint32_t mlen, uint8_t *n, uint8_t *k)
 {
   secretbox_easy(mlen, c, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -343,7 +343,7 @@ Hacl_NaCl_crypto_secretbox_open_easy(
   uint8_t *k
 )
 {
-  return secretbox_open_easy(clen - (uint32_t)16U, m, k, n, c);
+  return secretbox_open_easy(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -490,7 +490,7 @@ Hacl_NaCl_crypto_box_open_easy_afternm(
   uint8_t *k
 )
 {
-  return box_open_easy_afternm(clen - (uint32_t)16U, m, k, n, c);
+  return box_open_easy_afternm(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -513,6 +513,6 @@ Hacl_NaCl_crypto_box_open_easy(
   uint8_t *sk
 )
 {
-  return box_open_easy(clen - (uint32_t)16U, m, pk, sk, n, c);
+  return box_open_easy(clen - 16U, m, pk, sk, n, c);
 }
 
diff --git a/src/msvc/Hacl_P256.c b/src/msvc/Hacl_P256.c
index 7e586e54..ed09716d 100644
--- a/src/msvc/Hacl_P256.c
+++ b/src/msvc/Hacl_P256.c
@@ -33,11 +33,11 @@
 static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -48,16 +48,16 @@ static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 static inline bool bn_is_zero_vartime4(uint64_t *f)
 {
   uint64_t m = bn_is_zero_mask4(f);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -67,16 +67,16 @@ static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 static inline bool bn_is_eq_vartime4(uint64_t *a, uint64_t *b)
 {
   uint64_t m = bn_is_eq_mask4(a, b);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_t *y)
 {
-  uint64_t mask = ~FStar_UInt64_eq_mask(cin, (uint64_t)0U);
+  uint64_t mask = ~FStar_UInt64_eq_mask(cin, 0ULL);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t uu____0 = x[i];
     uint64_t x1 = uu____0 ^ (mask & (y[i] ^ uu____0));
@@ -85,52 +85,52 @@ static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_
 
 static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -138,23 +138,23 @@ static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c0 = c;
@@ -163,53 +163,53 @@ static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 
 static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x1;);
@@ -217,59 +217,59 @@ static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline void bn_mul4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = y[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = x[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = x[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = x[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = x[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = x[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static inline void bn_sqr4(uint64_t *res, uint64_t *x)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = x;
     uint64_t a_j = x[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -277,41 +277,37 @@ static inline void bn_sqr4(uint64_t *res, uint64_t *x)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(x[i], x[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void bn_to_bytes_be4(uint8_t *res, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
 }
@@ -319,79 +315,79 @@ static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 static inline void bn2_to_bytes_be4(uint8_t *res, uint64_t *x, uint64_t *y)
 {
   bn_to_bytes_be4(res, x);
-  bn_to_bytes_be4(res + (uint32_t)32U, y);
+  bn_to_bytes_be4(res + 32U, y);
 }
 
 static inline void make_prime(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xffffffffffffffffU;
-  n[1U] = (uint64_t)0xffffffffU;
-  n[2U] = (uint64_t)0x0U;
-  n[3U] = (uint64_t)0xffffffff00000001U;
+  n[0U] = 0xffffffffffffffffULL;
+  n[1U] = 0xffffffffULL;
+  n[2U] = 0x0ULL;
+  n[3U] = 0xffffffff00000001ULL;
 }
 
 static inline void make_order(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xf3b9cac2fc632551U;
-  n[1U] = (uint64_t)0xbce6faada7179e84U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xffffffff00000000U;
+  n[0U] = 0xf3b9cac2fc632551ULL;
+  n[1U] = 0xbce6faada7179e84ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xffffffff00000000ULL;
 }
 
 static inline void make_a_coeff(uint64_t *a)
 {
-  a[0U] = (uint64_t)0xfffffffffffffffcU;
-  a[1U] = (uint64_t)0x3ffffffffU;
-  a[2U] = (uint64_t)0x0U;
-  a[3U] = (uint64_t)0xfffffffc00000004U;
+  a[0U] = 0xfffffffffffffffcULL;
+  a[1U] = 0x3ffffffffULL;
+  a[2U] = 0x0ULL;
+  a[3U] = 0xfffffffc00000004ULL;
 }
 
 static inline void make_b_coeff(uint64_t *b)
 {
-  b[0U] = (uint64_t)0xd89cdf6229c4bddfU;
-  b[1U] = (uint64_t)0xacf005cd78843090U;
-  b[2U] = (uint64_t)0xe5a220abf7212ed6U;
-  b[3U] = (uint64_t)0xdc30061d04874834U;
+  b[0U] = 0xd89cdf6229c4bddfULL;
+  b[1U] = 0xacf005cd78843090ULL;
+  b[2U] = 0xe5a220abf7212ed6ULL;
+  b[3U] = 0xdc30061d04874834ULL;
 }
 
 static inline void make_g_x(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x79e730d418a9143cU;
-  n[1U] = (uint64_t)0x75ba95fc5fedb601U;
-  n[2U] = (uint64_t)0x79fb732b77622510U;
-  n[3U] = (uint64_t)0x18905f76a53755c6U;
+  n[0U] = 0x79e730d418a9143cULL;
+  n[1U] = 0x75ba95fc5fedb601ULL;
+  n[2U] = 0x79fb732b77622510ULL;
+  n[3U] = 0x18905f76a53755c6ULL;
 }
 
 static inline void make_g_y(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xddf25357ce95560aU;
-  n[1U] = (uint64_t)0x8b4ab8e4ba19e45cU;
-  n[2U] = (uint64_t)0xd2e88688dd21f325U;
-  n[3U] = (uint64_t)0x8571ff1825885d85U;
+  n[0U] = 0xddf25357ce95560aULL;
+  n[1U] = 0x8b4ab8e4ba19e45cULL;
+  n[2U] = 0xd2e88688dd21f325ULL;
+  n[3U] = 0x8571ff1825885d85ULL;
 }
 
 static inline void make_fmont_R2(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x3U;
-  n[1U] = (uint64_t)0xfffffffbffffffffU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0x4fffffffdU;
+  n[0U] = 0x3ULL;
+  n[1U] = 0xfffffffbffffffffULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0x4fffffffdULL;
 }
 
 static inline void make_fzero(uint64_t *n)
 {
-  n[0U] = (uint64_t)0U;
-  n[1U] = (uint64_t)0U;
-  n[2U] = (uint64_t)0U;
-  n[3U] = (uint64_t)0U;
+  n[0U] = 0ULL;
+  n[1U] = 0ULL;
+  n[2U] = 0ULL;
+  n[3U] = 0ULL;
 }
 
 static inline void make_fone(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x1U;
-  n[1U] = (uint64_t)0xffffffff00000000U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xfffffffeU;
+  n[0U] = 0x1ULL;
+  n[1U] = 0xffffffff00000000ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xfffffffeULL;
 }
 
 static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
@@ -399,7 +395,7 @@ static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_prime(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t feq_mask(uint64_t *a, uint64_t *b)
@@ -435,61 +431,61 @@ static inline void mont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_prime(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)1U * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 1ULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -512,7 +508,7 @@ static inline void fsqr0(uint64_t *res, uint64_t *x)
 static inline void from_mont(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, a, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, a, 4U * sizeof (uint64_t));
   mont_reduction(res, tmp);
 }
 
@@ -540,105 +536,105 @@ static inline void finv(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *x30 = tmp;
-  uint64_t *x2 = tmp + (uint32_t)4U;
-  uint64_t *tmp1 = tmp + (uint32_t)8U;
-  uint64_t *tmp2 = tmp + (uint32_t)12U;
-  memcpy(x2, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x2 = tmp + 4U;
+  uint64_t *tmp1 = tmp + 8U;
+  uint64_t *tmp2 = tmp + 12U;
+  memcpy(x2, a, 4U * sizeof (uint64_t));
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  memcpy(x30, x2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x30, x2, 4U * sizeof (uint64_t));
   {
     fsqr0(x30, x30);
   }
   fmul0(x30, x30, a);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(x30, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR15(i, (uint32_t)0U, (uint32_t)15U, (uint32_t)1U, fsqr0(x30, x30););
+  memcpy(x30, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR15(i, 0U, 15U, 1U, fsqr0(x30, x30););
   fmul0(x30, x30, tmp1);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x2);
-  memcpy(x2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(x2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)30U; i++)
+  for (uint32_t i = 0U; i < 30U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, x30);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(x2, x2););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(x2, x2););
   fmul0(tmp1, x2, a);
-  memcpy(res, tmp1, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp1, 4U * sizeof (uint64_t));
 }
 
 static inline void fsqrt(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)4U;
-  memcpy(tmp1, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *tmp2 = tmp + 4U;
+  memcpy(tmp1, a, 4U * sizeof (uint64_t));
   {
     fsqr0(tmp1, tmp1);
   }
   fmul0(tmp1, tmp1, a);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)96U; i++)
+  for (uint32_t i = 0U; i < 96U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)94U; i++)
+  for (uint32_t i = 0U; i < 94U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
-  memcpy(res, tmp2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp2, 4U * sizeof (uint64_t));
 }
 
 static inline void make_base_point(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_g_x(x);
   make_g_y(y);
   make_fone(z);
@@ -647,8 +643,8 @@ static inline void make_base_point(uint64_t *p)
 static inline void make_point_at_inf(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_fzero(x);
   make_fone(y);
   make_fzero(z);
@@ -656,7 +652,7 @@ static inline void make_point_at_inf(uint64_t *p)
 
 static inline bool is_point_at_inf_vartime(uint64_t *p)
 {
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   return bn_is_zero_vartime4(pz);
 }
 
@@ -664,10 +660,10 @@ static inline void to_aff_point(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *py = p + 4U;
+  uint64_t *pz = p + 8U;
   uint64_t *x = res;
-  uint64_t *y = res + (uint32_t)4U;
+  uint64_t *y = res + 4U;
   finv(zinv, pz);
   fmul0(x, px, zinv);
   fmul0(y, py, zinv);
@@ -679,7 +675,7 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   finv(zinv, pz);
   fmul0(res, px, zinv);
   from_mont(res, res);
@@ -688,10 +684,10 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 static inline void to_proj_point(uint64_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)4U;
-  uint64_t *rz = res + (uint32_t)8U;
+  uint64_t *ry = res + 4U;
+  uint64_t *rz = res + 8U;
   to_mont(rx, px);
   to_mont(ry, py);
   make_fone(rz);
@@ -703,7 +699,7 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   uint64_t tx[4U] = { 0U };
   uint64_t ty[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   to_mont(tx, px);
   to_mont(ty, py);
   uint64_t tmp[4U] = { 0U };
@@ -715,14 +711,14 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   fadd0(rp, tmp, rp);
   fsqr0(ty, ty);
   uint64_t r = feq_mask(ty, rp);
-  bool r0 = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool r0 = r == 0xFFFFFFFFFFFFFFFFULL;
   return r0;
 }
 
 static inline void aff_point_store(uint8_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   bn2_to_bytes_be4(res, px, py);
 }
 
@@ -736,17 +732,17 @@ static inline void point_store(uint8_t *res, uint64_t *p)
 static inline bool aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *p_x = b;
-  uint8_t *p_y = b + (uint32_t)32U;
+  uint8_t *p_y = b + 32U;
   uint64_t *bn_p_x = p;
-  uint64_t *bn_p_y = p + (uint32_t)4U;
+  uint64_t *bn_p_y = p + 4U;
   bn_from_bytes_be4(bn_p_x, p_x);
   bn_from_bytes_be4(bn_p_y, p_y);
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t lessX = bn_is_lt_prime_mask4(px);
   uint64_t lessY = bn_is_lt_prime_mask4(py);
   uint64_t res = lessX & lessY;
-  bool is_xy_valid = res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_xy_valid = res == 0xFFFFFFFFFFFFFFFFULL;
   if (!is_xy_valid)
   {
     return false;
@@ -769,15 +765,15 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bn_from_bytes_be4(x, xb);
   uint64_t is_x_valid = bn_is_lt_prime_mask4(x);
-  bool is_x_valid1 = is_x_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_x_valid1 = is_x_valid == 0xFFFFFFFFFFFFFFFFULL;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid1)
   {
     return false;
@@ -797,14 +793,14 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
   from_mont(y, yM);
   fsqr0(yM, yM);
   uint64_t r = feq_mask(yM, y2M);
-  bool is_y_valid = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_y_valid = r == 0xFFFFFFFFFFFFFFFFULL;
   bool is_y_valid0 = is_y_valid;
   if (!is_y_valid0)
   {
     return false;
   }
-  uint64_t is_y_odd1 = y[0U] & (uint64_t)1U;
-  bool is_y_odd2 = is_y_odd1 == (uint64_t)1U;
+  uint64_t is_y_odd1 = y[0U] & 1ULL;
+  bool is_y_odd2 = is_y_odd1 == 1ULL;
   fnegate_conditional_vartime(y, is_y_odd2 != is_y_odd);
   return true;
 }
@@ -813,18 +809,18 @@ static inline void point_double(uint64_t *res, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *x = p;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *z = p + 8U;
   uint64_t *x3 = res;
-  uint64_t *y3 = res + (uint32_t)4U;
-  uint64_t *z3 = res + (uint32_t)8U;
+  uint64_t *y3 = res + 4U;
+  uint64_t *z3 = res + 8U;
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)4U;
-  uint64_t *t2 = tmp + (uint32_t)8U;
-  uint64_t *t3 = tmp + (uint32_t)12U;
-  uint64_t *t4 = tmp + (uint32_t)16U;
+  uint64_t *t1 = tmp + 4U;
+  uint64_t *t2 = tmp + 8U;
+  uint64_t *t3 = tmp + 12U;
+  uint64_t *t4 = tmp + 16U;
   uint64_t *x1 = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z1 = p + 8U;
   fsqr0(t0, x1);
   fsqr0(t1, y);
   fsqr0(t2, z1);
@@ -865,22 +861,22 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[36U] = { 0U };
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)24U;
+  uint64_t *t1 = tmp + 24U;
   uint64_t *x3 = t1;
-  uint64_t *y3 = t1 + (uint32_t)4U;
-  uint64_t *z3 = t1 + (uint32_t)8U;
+  uint64_t *y3 = t1 + 4U;
+  uint64_t *z3 = t1 + 8U;
   uint64_t *t01 = t0;
-  uint64_t *t11 = t0 + (uint32_t)4U;
-  uint64_t *t2 = t0 + (uint32_t)8U;
-  uint64_t *t3 = t0 + (uint32_t)12U;
-  uint64_t *t4 = t0 + (uint32_t)16U;
-  uint64_t *t5 = t0 + (uint32_t)20U;
+  uint64_t *t11 = t0 + 4U;
+  uint64_t *t2 = t0 + 8U;
+  uint64_t *t3 = t0 + 12U;
+  uint64_t *t4 = t0 + 16U;
+  uint64_t *t5 = t0 + 20U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)4U;
-  uint64_t *z10 = p + (uint32_t)8U;
+  uint64_t *y1 = p + 4U;
+  uint64_t *z10 = p + 8U;
   uint64_t *x20 = q;
-  uint64_t *y20 = q + (uint32_t)4U;
-  uint64_t *z20 = q + (uint32_t)8U;
+  uint64_t *y20 = q + 4U;
+  uint64_t *z20 = q + 8U;
   fmul0(t01, x1, x20);
   fmul0(t11, y1, y20);
   fmul0(t2, z10, z20);
@@ -888,10 +884,10 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t4, x20, y20);
   fmul0(t3, t3, t4);
   fadd0(t4, t01, t11);
-  uint64_t *y10 = p + (uint32_t)4U;
-  uint64_t *z11 = p + (uint32_t)8U;
-  uint64_t *y2 = q + (uint32_t)4U;
-  uint64_t *z21 = q + (uint32_t)8U;
+  uint64_t *y10 = p + 4U;
+  uint64_t *z11 = p + 8U;
+  uint64_t *y2 = q + 4U;
+  uint64_t *z21 = q + 8U;
   fsub0(t3, t3, t4);
   fadd0(t4, y10, z11);
   fadd0(t5, y2, z21);
@@ -899,9 +895,9 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t5, t11, t2);
   fsub0(t4, t4, t5);
   uint64_t *x10 = p;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *z1 = p + 8U;
   uint64_t *x2 = q;
-  uint64_t *z2 = q + (uint32_t)8U;
+  uint64_t *z2 = q + 8U;
   fadd0(x3, x10, z1);
   fadd0(y3, x2, z2);
   fmul0(x3, x3, y3);
@@ -932,7 +928,7 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fmul0(z3, t4, z3);
   fmul0(t11, t3, t01);
   fadd0(z3, z3, t11);
-  memcpy(res, t1, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(res, t1, 12U * sizeof (uint64_t));
 }
 
 static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
@@ -940,41 +936,37 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
   uint64_t table[192U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)12U;
+  uint64_t *t1 = table + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, p, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, p, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 12U;
     point_add(tmp, p, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   make_point_at_inf(res);
   uint64_t tmp0[12U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 12U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)12U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 12U;
       KRML_MAYBE_FOR12(i,
-        (uint32_t)0U,
-        (uint32_t)12U,
-        (uint32_t)1U,
+        0U,
+        12U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -984,17 +976,17 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 12U;
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1007,64 +999,58 @@ static inline void point_mul_g(uint64_t *res, uint64_t *scalar)
   uint64_t
   q2[12U] =
     {
-      (uint64_t)1499621593102562565U, (uint64_t)16692369783039433128U,
-      (uint64_t)15337520135922861848U, (uint64_t)5455737214495366228U,
-      (uint64_t)17827017231032529600U, (uint64_t)12413621606240782649U,
-      (uint64_t)2290483008028286132U, (uint64_t)15752017553340844820U,
-      (uint64_t)4846430910634234874U, (uint64_t)10861682798464583253U,
-      (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U
+      1499621593102562565ULL, 16692369783039433128ULL, 15337520135922861848ULL,
+      5455737214495366228ULL, 17827017231032529600ULL, 12413621606240782649ULL,
+      2290483008028286132ULL, 15752017553340844820ULL, 4846430910634234874ULL,
+      10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL
     };
   uint64_t
   q3[12U] =
     {
-      (uint64_t)14619254753077084366U, (uint64_t)13913835116514008593U,
-      (uint64_t)15060744674088488145U, (uint64_t)17668414598203068685U,
-      (uint64_t)10761169236902342334U, (uint64_t)15467027479157446221U,
-      (uint64_t)14989185522423469618U, (uint64_t)14354539272510107003U,
-      (uint64_t)14298211796392133693U, (uint64_t)13270323784253711450U,
-      (uint64_t)13380964971965046957U, (uint64_t)8686204248456909699U
+      14619254753077084366ULL, 13913835116514008593ULL, 15060744674088488145ULL,
+      17668414598203068685ULL, 10761169236902342334ULL, 15467027479157446221ULL,
+      14989185522423469618ULL, 14354539272510107003ULL, 14298211796392133693ULL,
+      13270323784253711450ULL, 13380964971965046957ULL, 8686204248456909699ULL
     };
   uint64_t
   q4[12U] =
     {
-      (uint64_t)7870395003430845958U, (uint64_t)18001862936410067720U,
-      (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-      (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U,
-      (uint64_t)7139806720777708306U, (uint64_t)8253938546650739833U,
-      (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-      (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U
+      7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+      5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL,
+      7139806720777708306ULL, 8253938546650739833ULL, 17490482834545705718ULL,
+      1065249776797037500ULL, 5018258455937968775ULL, 14100621120178668337ULL
     };
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   make_point_at_inf(res);
   uint64_t tmp[12U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     point_add(res, res, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     point_add(res, res, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     point_add(res, res, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     point_add(res, res, tmp););
-  KRML_HOST_IGNORE(q1);
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q1);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1075,54 +1061,48 @@ point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t
   uint64_t table2[384U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)12U;
+  uint64_t *t1 = table2 + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, q2, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table2 + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 12U;
     point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   uint64_t tmp0[12U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)12U;
-  memcpy(res, (uint64_t *)a_bits_l, (uint32_t)12U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 12U;
+  memcpy(res, (uint64_t *)a_bits_l, 12U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)12U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)12U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 12U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 12U * sizeof (uint64_t));
   point_add(res, res, tmp0);
   uint64_t tmp1[12U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, point_double(res, res););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)12U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)12U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
   }
 }
@@ -1132,7 +1112,7 @@ static inline uint64_t bn_is_lt_order_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_order(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t bn_is_lt_order_and_gt_zero_mask4(uint64_t *f)
@@ -1161,61 +1141,61 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_order(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)0xccd1c8aaee00bc4fU * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 0xccd1c8aaee00bc4fULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -1224,7 +1204,7 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 static inline void from_qmont(uint64_t *res, uint64_t *x)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, x, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, x, 4U * sizeof (uint64_t));
   qmont_reduction(res, tmp);
 }
 
@@ -1246,18 +1226,18 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1265,7 +1245,7 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 bool
@@ -1277,19 +1257,19 @@ Hacl_Impl_P256_DH_ecp256dh_r(
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1301,27 +1281,27 @@ Hacl_Impl_P256_DH_ecp256dh_r(
     point_mul(ss_proj, sk, pk);
     point_store(shared_secret, ss_proj);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
 static inline void qinv(uint64_t *res, uint64_t *r)
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *x6 = tmp;
-  uint64_t *x_11 = tmp + (uint32_t)4U;
-  uint64_t *x_101 = tmp + (uint32_t)8U;
-  uint64_t *x_111 = tmp + (uint32_t)12U;
-  uint64_t *x_1111 = tmp + (uint32_t)16U;
-  uint64_t *x_10101 = tmp + (uint32_t)20U;
-  uint64_t *x_101111 = tmp + (uint32_t)24U;
-  memcpy(x6, r, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x_11 = tmp + 4U;
+  uint64_t *x_101 = tmp + 8U;
+  uint64_t *x_111 = tmp + 12U;
+  uint64_t *x_1111 = tmp + 16U;
+  uint64_t *x_10101 = tmp + 20U;
+  uint64_t *x_101111 = tmp + 24U;
+  memcpy(x6, r, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_11, x6, r);
   qmul(x_101, x6, x_11);
   qmul(x_111, x6, x_101);
-  memcpy(x6, x_101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
@@ -1330,86 +1310,86 @@ static inline void qinv(uint64_t *res, uint64_t *r)
     qsqr(x6, x6);
   }
   qmul(x_10101, x6, r);
-  memcpy(x6, x_10101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_10101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_101111, x_101, x6);
   qmul(x6, x_10101, x6);
   uint64_t tmp1[4U] = { 0U };
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(x6, x6););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(x6, x6););
   qmul(x6, x6, x_11);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x6);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, qsqr(x6, x6););
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, qsqr(x6, x6););
   qmul(x6, x6, tmp1);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR9(i, (uint32_t)0U, (uint32_t)9U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR9(i, 0U, 9U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR10(i, (uint32_t)0U, (uint32_t)10U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR10(i, 0U, 10U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR7(i, (uint32_t)0U, (uint32_t)7U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR7(i, 0U, 7U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(res, x6, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  memcpy(res, x6, 4U * sizeof (uint64_t));
 }
 
 static inline void qmul_mont(uint64_t *sinv, uint64_t *b, uint64_t *res)
@@ -1429,20 +1409,16 @@ ecdsa_verify_msg_as_qelem(
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)12U;
-  uint64_t *s_q = tmp + (uint32_t)16U;
-  uint64_t *u1 = tmp + (uint32_t)20U;
-  uint64_t *u2 = tmp + (uint32_t)24U;
+  uint64_t *r_q = tmp + 12U;
+  uint64_t *s_q = tmp + 16U;
+  uint64_t *u1 = tmp + 20U;
+  uint64_t *u2 = tmp + 24U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bn_from_bytes_be4(r_q, signature_r);
   bn_from_bytes_be4(s_q, signature_s);
   uint64_t is_r_valid = bn_is_lt_order_and_gt_zero_mask4(r_q);
   uint64_t is_s_valid = bn_is_lt_order_and_gt_zero_mask4(s_q);
-  bool
-  is_rs_valid =
-    is_r_valid
-    == (uint64_t)0xFFFFFFFFFFFFFFFFU
-    && is_s_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_rs_valid = is_r_valid == 0xFFFFFFFFFFFFFFFFULL && is_s_valid == 0xFFFFFFFFFFFFFFFFULL;
   if (!(is_pk_valid && is_rs_valid))
   {
     return false;
@@ -1474,20 +1450,20 @@ ecdsa_sign_msg_as_qelem(
 {
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   bn_from_bytes_be4(d_a, private_key);
   uint64_t is_b_valid0 = bn_is_lt_order_and_gt_zero_mask4(d_a);
   uint64_t oneq0[4U] = { 0U };
-  oneq0[0U] = (uint64_t)1U;
-  oneq0[1U] = (uint64_t)0U;
-  oneq0[2U] = (uint64_t)0U;
-  oneq0[3U] = (uint64_t)0U;
+  oneq0[0U] = 1ULL;
+  oneq0[1U] = 0ULL;
+  oneq0[2U] = 0ULL;
+  oneq0[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq0[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
@@ -1496,14 +1472,14 @@ ecdsa_sign_msg_as_qelem(
   bn_from_bytes_be4(k_q, nonce);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(k_q);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1524,7 +1500,7 @@ ecdsa_sign_msg_as_qelem(
   uint64_t is_r_zero = bn_is_zero_mask4(r_q);
   uint64_t is_s_zero = bn_is_zero_mask4(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1572,7 +1548,7 @@ Hacl_P256_ecdsa_sign_p256_sha2(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
   Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1605,7 +1581,7 @@ Hacl_P256_ecdsa_sign_p256_sha384(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
   Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1638,7 +1614,7 @@ Hacl_P256_ecdsa_sign_p256_sha512(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
   Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1680,8 +1656,8 @@ Hacl_P256_ecdsa_sign_p256_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1717,7 +1693,7 @@ Hacl_P256_ecdsa_verif_p256_sha2(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
   Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1748,7 +1724,7 @@ Hacl_P256_ecdsa_verif_p256_sha384(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
   Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1779,7 +1755,7 @@ Hacl_P256_ecdsa_verif_p256_sha512(
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
   Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1814,8 +1790,8 @@ Hacl_P256_ecdsa_verif_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1864,7 +1840,7 @@ bool Hacl_P256_validate_private_key(uint8_t *private_key)
   uint64_t bn_sk[4U] = { 0U };
   bn_from_bytes_be4(bn_sk, private_key);
   uint64_t res = bn_is_lt_order_and_gt_zero_mask4(bn_sk);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /*******************************************************************************
@@ -1893,11 +1869,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_P256_uncompressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1915,12 +1891,12 @@ bool Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint64_t xa[4U] = { 0U };
   uint64_t ya[4U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    bn_to_bytes_be4(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    bn_to_bytes_be4(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -1935,8 +1911,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_P256_raw_to_uncompressed(uint8_t *pk_raw, uint8_t *pk)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -1950,12 +1926,12 @@ Convert a public key from raw to its compressed form.
 void Hacl_P256_raw_to_compressed(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint64_t bn_f[4U] = { 0U };
   bn_from_bytes_be4(bn_f, pk_y);
-  uint64_t is_odd_f = bn_f[0U] & (uint64_t)1U;
-  pk[0U] = (uint8_t)is_odd_f + (uint8_t)0x02U;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  uint64_t is_odd_f = bn_f[0U] & 1ULL;
+  pk[0U] = (uint32_t)(uint8_t)is_odd_f + 0x02U;
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
diff --git a/src/msvc/Hacl_Poly1305_128.c b/src/msvc/Hacl_Poly1305_128.c
index f400fe82..ad1d8639 100644
--- a/src/msvc/Hacl_Poly1305_128.c
+++ b/src/msvc/Hacl_Poly1305_128.c
@@ -30,33 +30,28 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(b);
-  Lib_IntVector_Intrinsics_vec128
-  b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + (uint32_t)16U);
+  Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + 16U);
   Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
   Lib_IntVector_Intrinsics_vec128 hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(lo,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f10 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
   Lib_IntVector_Intrinsics_vec128 f02 = f00;
   Lib_IntVector_Intrinsics_vec128 f12 = f10;
   Lib_IntVector_Intrinsics_vec128 f22 = f20;
@@ -67,7 +62,7 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   e[2U] = f22;
   e[3U] = f32;
   e[4U] = f42;
-  uint64_t b10 = (uint64_t)0x1000000U;
+  uint64_t b10 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b10);
   Lib_IntVector_Intrinsics_vec128 f43 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f43, mask);
@@ -81,16 +76,11 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   Lib_IntVector_Intrinsics_vec128 e2 = e[2U];
   Lib_IntVector_Intrinsics_vec128 e3 = e[3U];
   Lib_IntVector_Intrinsics_vec128 e4 = e[4U];
-  Lib_IntVector_Intrinsics_vec128
-  f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, (uint64_t)0U, (uint32_t)1U);
+  Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, 0ULL, 1U);
   Lib_IntVector_Intrinsics_vec128 f01 = Lib_IntVector_Intrinsics_vec128_add64(f0, e0);
   Lib_IntVector_Intrinsics_vec128 f11 = Lib_IntVector_Intrinsics_vec128_add64(f1, e1);
   Lib_IntVector_Intrinsics_vec128 f21 = Lib_IntVector_Intrinsics_vec128_add64(f2, e2);
@@ -115,7 +105,7 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r = p;
-  Lib_IntVector_Intrinsics_vec128 *r2 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = p + 10U;
   Lib_IntVector_Intrinsics_vec128 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec128 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec128 a2 = out[2U];
@@ -141,14 +131,10 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   r231 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r23, r13);
   Lib_IntVector_Intrinsics_vec128
   r241 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r24, r14);
-  Lib_IntVector_Intrinsics_vec128
-  r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec128 r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, 5ULL);
   Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_mul64(r201, a0);
   Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_mul64(r211, a0);
   Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_mul64(r221, a0);
@@ -239,37 +225,28 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128 t2 = a25;
   Lib_IntVector_Intrinsics_vec128 t3 = a35;
   Lib_IntVector_Intrinsics_vec128 t4 = a45;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -302,41 +279,36 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128
   o00 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
@@ -351,7 +323,7 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
 void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec128_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec128_zero;
@@ -360,41 +332,38 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec128_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec128 r_vec0 = Lib_IntVector_Intrinsics_vec128_load64(lo1);
   Lib_IntVector_Intrinsics_vec128 r_vec1 = Lib_IntVector_Intrinsics_vec128_load64(hi1);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f15 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec128 f0 = f00;
   Lib_IntVector_Intrinsics_vec128 f1 = f15;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -410,11 +379,11 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec128 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec128 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -511,37 +480,28 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 t2 = a24;
   Lib_IntVector_Intrinsics_vec128 t3 = a34;
   Lib_IntVector_Intrinsics_vec128 t4 = a44;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -559,47 +519,43 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec128 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec128 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, 5ULL);
 }
 
 void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   uint64_t u0 = load64_le(text);
   uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
+  uint64_t u = load64_le(text + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
   Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec128 f01 = f010;
   Lib_IntVector_Intrinsics_vec128 f111 = f110;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -610,12 +566,12 @@ void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, ui
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
   Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -730,37 +686,28 @@ void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, ui
   Lib_IntVector_Intrinsics_vec128 t2 = a26;
   Lib_IntVector_Intrinsics_vec128 t3 = a36;
   Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -782,52 +729,47 @@ Hacl_Poly1305_128_poly1305_update(
   uint8_t *text
 )
 {
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint32_t sz_block = (uint32_t)32U;
+  uint32_t sz_block = 32U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t0;
     Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -838,12 +780,12 @@ Hacl_Poly1305_128_poly1305_update(
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -948,37 +890,28 @@ Hacl_Poly1305_128_poly1305_update(
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -1016,41 +949,37 @@ Hacl_Poly1305_128_poly1305_update(
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1061,12 +990,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1181,37 +1110,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1225,41 +1145,37 @@ Hacl_Poly1305_128_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1270,12 +1186,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1390,37 +1306,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1445,7 +1352,7 @@ Hacl_Poly1305_128_poly1305_finish(
 )
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec128 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec128 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec128 f23 = acc[2U];
@@ -1456,41 +1363,36 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp00 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec128
   tmp10 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec128
   tmp20 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec128
   tmp30 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec128
   tmp40 =
     Lib_IntVector_Intrinsics_vec128_and(l4,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec128 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec128 f310 = tmp30;
@@ -1500,49 +1402,42 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l5 = Lib_IntVector_Intrinsics_vec128_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l5,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l6,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l7,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l8,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec128
   f02 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec128 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec128 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec128 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec128
-  mh = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  ml = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec128 mh = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 ml = Lib_IntVector_Intrinsics_vec128_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec128
   mask1 =
@@ -1582,29 +1477,29 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec128 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec128 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
 }
 
 void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
diff --git a/src/msvc/Hacl_Poly1305_256.c b/src/msvc/Hacl_Poly1305_256.c
index db28cdc7..a60bc238 100644
--- a/src/msvc/Hacl_Poly1305_256.c
+++ b/src/msvc/Hacl_Poly1305_256.c
@@ -30,32 +30,24 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(b);
-  Lib_IntVector_Intrinsics_vec256
-  hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + (uint32_t)32U);
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+  Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + 32U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
   Lib_IntVector_Intrinsics_vec256 m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
   Lib_IntVector_Intrinsics_vec256
   m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-  Lib_IntVector_Intrinsics_vec256
-  m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-  Lib_IntVector_Intrinsics_vec256
-  m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+  Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+  Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
   Lib_IntVector_Intrinsics_vec256 m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-  Lib_IntVector_Intrinsics_vec256
-  t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)4U);
+  Lib_IntVector_Intrinsics_vec256 t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 4U);
   Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t2, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
   Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t1, mask26);
   Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)30U);
+  Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 30U);
   Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+  Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
   Lib_IntVector_Intrinsics_vec256 o0 = o5;
   Lib_IntVector_Intrinsics_vec256 o1 = o10;
   Lib_IntVector_Intrinsics_vec256 o2 = o20;
@@ -66,7 +58,7 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   e[2U] = o2;
   e[3U] = o3;
   e[4U] = o4;
-  uint64_t b1 = (uint64_t)0x1000000U;
+  uint64_t b1 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b1);
   Lib_IntVector_Intrinsics_vec256 f40 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f40, mask);
@@ -88,28 +80,28 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   Lib_IntVector_Intrinsics_vec256
   r01 =
     Lib_IntVector_Intrinsics_vec256_insert64(r0,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc0, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc0, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r11 =
     Lib_IntVector_Intrinsics_vec256_insert64(r1,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc1, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc1, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r21 =
     Lib_IntVector_Intrinsics_vec256_insert64(r2,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc2, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc2, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r31 =
     Lib_IntVector_Intrinsics_vec256_insert64(r3,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc3, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc3, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r41 =
     Lib_IntVector_Intrinsics_vec256_insert64(r4,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc4, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc4, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_add64(r01, e0);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_add64(r11, e1);
   Lib_IntVector_Intrinsics_vec256 f2 = Lib_IntVector_Intrinsics_vec256_add64(r21, e2);
@@ -134,8 +126,8 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r = p;
-  Lib_IntVector_Intrinsics_vec256 *r_5 = p + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *r4 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec256 *r_5 = p + 5U;
+  Lib_IntVector_Intrinsics_vec256 *r4 = p + 10U;
   Lib_IntVector_Intrinsics_vec256 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec256 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec256 a2 = out[2U];
@@ -245,37 +237,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t20 = a250;
   Lib_IntVector_Intrinsics_vec256 t30 = a350;
   Lib_IntVector_Intrinsics_vec256 t40 = a450;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 r20 = x020;
@@ -373,37 +358,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t21 = a251;
   Lib_IntVector_Intrinsics_vec256 t31 = a351;
   Lib_IntVector_Intrinsics_vec256 t41 = a451;
-  Lib_IntVector_Intrinsics_vec256
-  mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask261 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+  Lib_IntVector_Intrinsics_vec256 z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, 26U);
   Lib_IntVector_Intrinsics_vec256 x03 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
   Lib_IntVector_Intrinsics_vec256 x33 = Lib_IntVector_Intrinsics_vec256_and(t31, mask261);
   Lib_IntVector_Intrinsics_vec256 x13 = Lib_IntVector_Intrinsics_vec256_add64(t11, z04);
   Lib_IntVector_Intrinsics_vec256 x43 = Lib_IntVector_Intrinsics_vec256_add64(t41, z14);
-  Lib_IntVector_Intrinsics_vec256
-  z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, 26U);
+  Lib_IntVector_Intrinsics_vec256 z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, 26U);
+  Lib_IntVector_Intrinsics_vec256 t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z111, t6);
   Lib_IntVector_Intrinsics_vec256 x111 = Lib_IntVector_Intrinsics_vec256_and(x13, mask261);
   Lib_IntVector_Intrinsics_vec256 x411 = Lib_IntVector_Intrinsics_vec256_and(x43, mask261);
   Lib_IntVector_Intrinsics_vec256 x22 = Lib_IntVector_Intrinsics_vec256_add64(t21, z011);
   Lib_IntVector_Intrinsics_vec256 x011 = Lib_IntVector_Intrinsics_vec256_add64(x03, z120);
+  Lib_IntVector_Intrinsics_vec256 z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, (uint32_t)26U);
+  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, 26U);
   Lib_IntVector_Intrinsics_vec256 x211 = Lib_IntVector_Intrinsics_vec256_and(x22, mask261);
   Lib_IntVector_Intrinsics_vec256 x021 = Lib_IntVector_Intrinsics_vec256_and(x011, mask261);
   Lib_IntVector_Intrinsics_vec256 x311 = Lib_IntVector_Intrinsics_vec256_add64(x33, z021);
   Lib_IntVector_Intrinsics_vec256 x121 = Lib_IntVector_Intrinsics_vec256_add64(x111, z131);
   Lib_IntVector_Intrinsics_vec256
-  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, (uint32_t)26U);
+  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, 26U);
   Lib_IntVector_Intrinsics_vec256 x321 = Lib_IntVector_Intrinsics_vec256_and(x311, mask261);
   Lib_IntVector_Intrinsics_vec256 x421 = Lib_IntVector_Intrinsics_vec256_add64(x411, z031);
   Lib_IntVector_Intrinsics_vec256 r30 = x021;
@@ -441,14 +419,10 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   v34344 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r44, r34);
   Lib_IntVector_Intrinsics_vec256
   r12344 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34344, v12124);
-  Lib_IntVector_Intrinsics_vec256
-  r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec256 r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, 5ULL);
   Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_mul64(r12340, a0);
   Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_mul64(r12341, a0);
   Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_mul64(r12342, a0);
@@ -539,37 +513,28 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t2 = a25;
   Lib_IntVector_Intrinsics_vec256 t3 = a35;
   Lib_IntVector_Intrinsics_vec256 t4 = a45;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z121 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z121);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -612,41 +577,36 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l0 = Lib_IntVector_Intrinsics_vec256_add64(v21, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(v22, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(v23, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(v24, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256
   o00 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec256 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec256 o3 = tmp3;
@@ -661,7 +621,7 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
 void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec256_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec256_zero;
@@ -670,41 +630,38 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec256_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec256 r_vec0 = Lib_IntVector_Intrinsics_vec256_load64(lo1);
   Lib_IntVector_Intrinsics_vec256 r_vec1 = Lib_IntVector_Intrinsics_vec256_load64(hi1);
   Lib_IntVector_Intrinsics_vec256
   f00 =
     Lib_IntVector_Intrinsics_vec256_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f15 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec256 f0 = f00;
   Lib_IntVector_Intrinsics_vec256 f1 = f15;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -720,11 +677,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec256 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec256 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r10 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r20 = r[2U];
@@ -829,37 +786,30 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t20 = a240;
   Lib_IntVector_Intrinsics_vec256 t30 = a340;
   Lib_IntVector_Intrinsics_vec256 t40 = a440;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 o00 = x020;
@@ -877,11 +827,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f221 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f231 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f241 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r00 = rn[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -980,37 +930,28 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t2 = a24;
   Lib_IntVector_Intrinsics_vec256 t3 = a34;
   Lib_IntVector_Intrinsics_vec256 t4 = a44;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z120);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1028,47 +969,43 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, 5ULL);
 }
 
 void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   uint64_t u0 = load64_le(text);
   uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
+  uint64_t u = load64_le(text + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec256 f01 = f010;
   Lib_IntVector_Intrinsics_vec256 f111 = f110;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1079,12 +1016,12 @@ void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, ui
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
   Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1199,37 +1136,28 @@ void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, ui
   Lib_IntVector_Intrinsics_vec256 t2 = a26;
   Lib_IntVector_Intrinsics_vec256 t3 = a36;
   Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1251,54 +1179,48 @@ Hacl_Poly1305_256_poly1305_update(
   uint8_t *text
 )
 {
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint32_t sz_block = (uint32_t)64U;
+  uint32_t sz_block = 64U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t0;
     Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -1309,12 +1231,12 @@ Hacl_Poly1305_256_poly1305_update(
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -1419,37 +1341,28 @@ Hacl_Poly1305_256_poly1305_update(
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -1487,41 +1400,37 @@ Hacl_Poly1305_256_poly1305_update(
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1532,12 +1441,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1652,37 +1561,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1696,41 +1596,37 @@ Hacl_Poly1305_256_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1741,12 +1637,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1861,37 +1757,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1916,7 +1803,7 @@ Hacl_Poly1305_256_poly1305_finish(
 )
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec256 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec256 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec256 f23 = acc[2U];
@@ -1927,41 +1814,36 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp00 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec256
   tmp10 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec256
   tmp20 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec256
   tmp30 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256 l4 = Lib_IntVector_Intrinsics_vec256_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec256
   tmp40 =
     Lib_IntVector_Intrinsics_vec256_and(l4,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec256_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec256 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec256 f310 = tmp30;
@@ -1971,49 +1853,42 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l5 = Lib_IntVector_Intrinsics_vec256_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l5,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec256 l6 = Lib_IntVector_Intrinsics_vec256_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l6,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec256 l7 = Lib_IntVector_Intrinsics_vec256_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l7,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec256 l8 = Lib_IntVector_Intrinsics_vec256_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l8,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec256
   f02 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec256 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec256 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec256 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec256
-  mh = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  ml = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec256 mh = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 ml = Lib_IntVector_Intrinsics_vec256_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec256
   mask1 =
@@ -2053,29 +1928,29 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec256 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec256 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
 }
 
 void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
diff --git a/src/msvc/Hacl_Poly1305_32.c b/src/msvc/Hacl_Poly1305_32.c
index 5192559b..9761e157 100644
--- a/src/msvc/Hacl_Poly1305_32.c
+++ b/src/msvc/Hacl_Poly1305_32.c
@@ -28,32 +28,32 @@
 void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
 {
   uint64_t *acc = ctx;
-  uint64_t *pre = ctx + (uint32_t)5U;
+  uint64_t *pre = ctx + 5U;
   uint8_t *kr = key;
-  acc[0U] = (uint64_t)0U;
-  acc[1U] = (uint64_t)0U;
-  acc[2U] = (uint64_t)0U;
-  acc[3U] = (uint64_t)0U;
-  acc[4U] = (uint64_t)0U;
+  acc[0U] = 0ULL;
+  acc[1U] = 0ULL;
+  acc[2U] = 0ULL;
+  acc[3U] = 0ULL;
+  acc[4U] = 0ULL;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
-  uint64_t *rn = pre + (uint32_t)10U;
-  uint64_t *rn_5 = pre + (uint32_t)15U;
+  uint64_t *r5 = pre + 5U;
+  uint64_t *rn = pre + 10U;
+  uint64_t *rn_5 = pre + 15U;
   uint64_t r_vec0 = lo1;
   uint64_t r_vec1 = hi1;
-  uint64_t f00 = r_vec0 & (uint64_t)0x3ffffffU;
-  uint64_t f10 = r_vec0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = r_vec0 >> (uint32_t)52U | (r_vec1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = r_vec1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = r_vec1 >> (uint32_t)40U;
+  uint64_t f00 = r_vec0 & 0x3ffffffULL;
+  uint64_t f10 = r_vec0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = r_vec0 >> 52U | (r_vec1 & 0x3fffULL) << 12U;
+  uint64_t f30 = r_vec1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = r_vec1 >> 40U;
   uint64_t f0 = f00;
   uint64_t f1 = f10;
   uint64_t f2 = f20;
@@ -69,11 +69,11 @@ void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
   uint64_t f22 = r[2U];
   uint64_t f23 = r[3U];
   uint64_t f24 = r[4U];
-  r5[0U] = f200 * (uint64_t)5U;
-  r5[1U] = f21 * (uint64_t)5U;
-  r5[2U] = f22 * (uint64_t)5U;
-  r5[3U] = f23 * (uint64_t)5U;
-  r5[4U] = f24 * (uint64_t)5U;
+  r5[0U] = f200 * 5ULL;
+  r5[1U] = f21 * 5ULL;
+  r5[2U] = f22 * 5ULL;
+  r5[3U] = f23 * 5ULL;
+  r5[4U] = f24 * 5ULL;
   rn[0U] = r[0U];
   rn[1U] = r[1U];
   rn[2U] = r[2U];
@@ -88,20 +88,20 @@ void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
 
 void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
 {
-  uint64_t *pre = ctx + (uint32_t)5U;
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
   uint64_t e[5U] = { 0U };
   uint64_t u0 = load64_le(text);
   uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
+  uint64_t u = load64_le(text + 8U);
   uint64_t hi = u;
   uint64_t f0 = lo;
   uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
+  uint64_t f010 = f0 & 0x3ffffffULL;
+  uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+  uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = f1 >> 40U;
   uint64_t f01 = f010;
   uint64_t f111 = f110;
   uint64_t f2 = f20;
@@ -112,12 +112,12 @@ void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   uint64_t mask = b;
   uint64_t f4 = e[4U];
   e[4U] = f4 | mask;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
+  uint64_t *r5 = pre + 5U;
   uint64_t r0 = r[0U];
   uint64_t r1 = r[1U];
   uint64_t r2 = r[2U];
@@ -172,28 +172,28 @@ void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
   uint64_t t2 = a26;
   uint64_t t3 = a36;
   uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
+  uint64_t mask26 = 0x3ffffffULL;
+  uint64_t z0 = t0 >> 26U;
+  uint64_t z1 = t3 >> 26U;
   uint64_t x0 = t0 & mask26;
   uint64_t x3 = t3 & mask26;
   uint64_t x1 = t1 + z0;
   uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
+  uint64_t z01 = x1 >> 26U;
+  uint64_t z11 = x4 >> 26U;
+  uint64_t t = z11 << 2U;
   uint64_t z12 = z11 + t;
   uint64_t x11 = x1 & mask26;
   uint64_t x41 = x4 & mask26;
   uint64_t x2 = t2 + z01;
   uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
+  uint64_t z02 = x2 >> 26U;
+  uint64_t z13 = x01 >> 26U;
   uint64_t x21 = x2 & mask26;
   uint64_t x02 = x01 & mask26;
   uint64_t x31 = x3 + z02;
   uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
+  uint64_t z03 = x31 >> 26U;
   uint64_t x32 = x31 & mask26;
   uint64_t x42 = x41 + z03;
   uint64_t o0 = x02;
@@ -210,25 +210,25 @@ void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
 
 void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
 {
-  uint64_t *pre = ctx + (uint32_t)5U;
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
-  uint32_t nb = len / (uint32_t)16U;
-  uint32_t rem = len % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len / 16U;
+  uint32_t rem = len % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = text + i * (uint32_t)16U;
+    uint8_t *block = text + i * 16U;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -239,12 +239,12 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r[0U];
     uint64_t r1 = r[1U];
     uint64_t r2 = r[2U];
@@ -299,28 +299,28 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -334,23 +334,23 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = text + nb * (uint32_t)16U;
+    uint8_t *last = text + nb * 16U;
     uint64_t e[5U] = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -361,12 +361,12 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     uint64_t mask = b;
-    uint64_t fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = fi | mask;
+    uint64_t fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = fi | mask;
     uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r[0U];
     uint64_t r1 = r[1U];
     uint64_t r2 = r[2U];
@@ -421,28 +421,28 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -462,54 +462,54 @@ void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text
 void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
 {
   uint64_t *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   uint64_t f0 = acc[0U];
   uint64_t f13 = acc[1U];
   uint64_t f23 = acc[2U];
   uint64_t f33 = acc[3U];
   uint64_t f40 = acc[4U];
-  uint64_t l0 = f0 + (uint64_t)0U;
-  uint64_t tmp00 = l0 & (uint64_t)0x3ffffffU;
-  uint64_t c00 = l0 >> (uint32_t)26U;
+  uint64_t l0 = f0 + 0ULL;
+  uint64_t tmp00 = l0 & 0x3ffffffULL;
+  uint64_t c00 = l0 >> 26U;
   uint64_t l1 = f13 + c00;
-  uint64_t tmp10 = l1 & (uint64_t)0x3ffffffU;
-  uint64_t c10 = l1 >> (uint32_t)26U;
+  uint64_t tmp10 = l1 & 0x3ffffffULL;
+  uint64_t c10 = l1 >> 26U;
   uint64_t l2 = f23 + c10;
-  uint64_t tmp20 = l2 & (uint64_t)0x3ffffffU;
-  uint64_t c20 = l2 >> (uint32_t)26U;
+  uint64_t tmp20 = l2 & 0x3ffffffULL;
+  uint64_t c20 = l2 >> 26U;
   uint64_t l3 = f33 + c20;
-  uint64_t tmp30 = l3 & (uint64_t)0x3ffffffU;
-  uint64_t c30 = l3 >> (uint32_t)26U;
+  uint64_t tmp30 = l3 & 0x3ffffffULL;
+  uint64_t c30 = l3 >> 26U;
   uint64_t l4 = f40 + c30;
-  uint64_t tmp40 = l4 & (uint64_t)0x3ffffffU;
-  uint64_t c40 = l4 >> (uint32_t)26U;
-  uint64_t f010 = tmp00 + c40 * (uint64_t)5U;
+  uint64_t tmp40 = l4 & 0x3ffffffULL;
+  uint64_t c40 = l4 >> 26U;
+  uint64_t f010 = tmp00 + c40 * 5ULL;
   uint64_t f110 = tmp10;
   uint64_t f210 = tmp20;
   uint64_t f310 = tmp30;
   uint64_t f410 = tmp40;
-  uint64_t l = f010 + (uint64_t)0U;
-  uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
-  uint64_t c0 = l >> (uint32_t)26U;
+  uint64_t l = f010 + 0ULL;
+  uint64_t tmp0 = l & 0x3ffffffULL;
+  uint64_t c0 = l >> 26U;
   uint64_t l5 = f110 + c0;
-  uint64_t tmp1 = l5 & (uint64_t)0x3ffffffU;
-  uint64_t c1 = l5 >> (uint32_t)26U;
+  uint64_t tmp1 = l5 & 0x3ffffffULL;
+  uint64_t c1 = l5 >> 26U;
   uint64_t l6 = f210 + c1;
-  uint64_t tmp2 = l6 & (uint64_t)0x3ffffffU;
-  uint64_t c2 = l6 >> (uint32_t)26U;
+  uint64_t tmp2 = l6 & 0x3ffffffULL;
+  uint64_t c2 = l6 >> 26U;
   uint64_t l7 = f310 + c2;
-  uint64_t tmp3 = l7 & (uint64_t)0x3ffffffU;
-  uint64_t c3 = l7 >> (uint32_t)26U;
+  uint64_t tmp3 = l7 & 0x3ffffffULL;
+  uint64_t c3 = l7 >> 26U;
   uint64_t l8 = f410 + c3;
-  uint64_t tmp4 = l8 & (uint64_t)0x3ffffffU;
-  uint64_t c4 = l8 >> (uint32_t)26U;
-  uint64_t f02 = tmp0 + c4 * (uint64_t)5U;
+  uint64_t tmp4 = l8 & 0x3ffffffULL;
+  uint64_t c4 = l8 >> 26U;
+  uint64_t f02 = tmp0 + c4 * 5ULL;
   uint64_t f12 = tmp1;
   uint64_t f22 = tmp2;
   uint64_t f32 = tmp3;
   uint64_t f42 = tmp4;
-  uint64_t mh = (uint64_t)0x3ffffffU;
-  uint64_t ml = (uint64_t)0x3fffffbU;
+  uint64_t mh = 0x3ffffffULL;
+  uint64_t ml = 0x3fffffbULL;
   uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
   uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
   uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
@@ -542,24 +542,24 @@ void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
   uint64_t f212 = f2;
   uint64_t f312 = f3;
   uint64_t f41 = f4;
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
 }
 
 void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
diff --git a/src/msvc/Hacl_RSAPSS.c b/src/msvc/Hacl_RSAPSS.c
index 084f10b3..44dd702c 100644
--- a/src/msvc/Hacl_RSAPSS.c
+++ b/src/msvc/Hacl_RSAPSS.c
@@ -35,51 +35,51 @@ static inline uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -126,48 +126,48 @@ mgf_hash(
   uint8_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), len + (uint32_t)4U);
-  uint8_t *mgfseed_counter = (uint8_t *)alloca((len + (uint32_t)4U) * sizeof (uint8_t));
-  memset(mgfseed_counter, 0U, (len + (uint32_t)4U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), len + 4U);
+  uint8_t *mgfseed_counter = (uint8_t *)alloca((len + 4U) * sizeof (uint8_t));
+  memset(mgfseed_counter, 0U, (len + 4U) * sizeof (uint8_t));
   memcpy(mgfseed_counter, mgfseed, len * sizeof (uint8_t));
   uint32_t hLen = hash_len(a);
-  uint32_t n = (maskLen - (uint32_t)1U) / hLen + (uint32_t)1U;
+  uint32_t n = (maskLen - 1U) / hLen + 1U;
   uint32_t accLen = n * hLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), accLen);
   uint8_t *acc = (uint8_t *)alloca(accLen * sizeof (uint8_t));
   memset(acc, 0U, accLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *acc_i = acc + i * hLen;
     uint8_t *c = mgfseed_counter + len;
-    c[0U] = (uint8_t)(i >> (uint32_t)24U);
-    c[1U] = (uint8_t)(i >> (uint32_t)16U);
-    c[2U] = (uint8_t)(i >> (uint32_t)8U);
+    c[0U] = (uint8_t)(i >> 24U);
+    c[1U] = (uint8_t)(i >> 16U);
+    c[2U] = (uint8_t)(i >> 8U);
     c[3U] = (uint8_t)i;
-    hash(a, acc_i, len + (uint32_t)4U, mgfseed_counter);
+    hash(a, acc_i, len + 4U, mgfseed_counter);
   }
   memcpy(res, acc, maskLen * sizeof (uint8_t));
 }
 
 static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 {
-  uint32_t bLen = (bs - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  if (bs == (uint32_t)64U * bLen)
+  uint32_t bLen = (bs - 1U) / 64U + 1U;
+  if (bs == 64U * bLen)
   {
-    return (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    return 0xFFFFFFFFFFFFFFFFULL;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
   uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
   memset(b2, 0U, bLen * sizeof (uint64_t));
-  uint32_t i0 = bs / (uint32_t)64U;
-  uint32_t j = bs % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+  uint32_t i0 = bs / 64U;
+  uint32_t j = bs % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < bLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   return res;
@@ -175,21 +175,21 @@ static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 
 static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 {
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint64_t bits0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bits0;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint64_t bits0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bits0;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *b2 = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (modBits - (uint32_t)1U) / (uint32_t)64U;
-  uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = (modBits - 1U) / 64U;
+  uint32_t j = (modBits - 1U) % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   uint64_t m1 = res;
@@ -199,12 +199,12 @@ static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 
 static inline uint64_t check_exponent_u64(uint32_t eBits, uint64_t *e)
 {
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), eLen);
   uint64_t *bn_zero = (uint64_t *)alloca(eLen * sizeof (uint64_t));
   memset(bn_zero, 0U, eLen * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < eLen; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < eLen; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(e[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -231,39 +231,39 @@ pss_encode(
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t *m1Hash = (uint8_t *)alloca(hLen * sizeof (uint8_t));
   memset(m1Hash, 0U, hLen * sizeof (uint8_t));
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t *m1 = (uint8_t *)alloca(m1Len * sizeof (uint8_t));
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash, m1Len, m1);
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t dbLen = emLen - hLen - (uint32_t)1U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t dbLen = emLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t *db = (uint8_t *)alloca(dbLen * sizeof (uint8_t));
   memset(db, 0U, dbLen * sizeof (uint8_t));
-  uint32_t last_before_salt = dbLen - saltLen - (uint32_t)1U;
-  db[last_before_salt] = (uint8_t)1U;
-  memcpy(db + last_before_salt + (uint32_t)1U, salt, saltLen * sizeof (uint8_t));
+  uint32_t last_before_salt = dbLen - saltLen - 1U;
+  db[last_before_salt] = 1U;
+  memcpy(db + last_before_salt + 1U, salt, saltLen * sizeof (uint8_t));
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t *dbMask = (uint8_t *)alloca(dbLen * sizeof (uint8_t));
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = db;
-    uint8_t x = db[i] ^ dbMask[i];
+    uint8_t x = (uint32_t)db[i] ^ (uint32_t)dbMask[i];
     os[i] = x;
   }
-  uint32_t msBits = emBits % (uint32_t)8U;
-  if (msBits > (uint32_t)0U)
+  uint32_t msBits = emBits % 8U;
+  if (msBits > 0U)
   {
-    db[0U] = db[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits);
+    db[0U] = (uint32_t)db[0U] & 0xffU >> (8U - msBits);
   }
   memcpy(em, db, dbLen * sizeof (uint8_t));
   memcpy(em + dbLen, m1Hash, hLen * sizeof (uint8_t));
-  em[emLen - (uint32_t)1U] = (uint8_t)0xbcU;
+  em[emLen - 1U] = 0xbcU;
 }
 
 static inline bool
@@ -276,105 +276,100 @@ pss_verify(
   uint8_t *em
 )
 {
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t msBits = emBits % (uint32_t)8U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t msBits = emBits % 8U;
   uint8_t em_0;
-  if (msBits > (uint32_t)0U)
+  if (msBits > 0U)
   {
-    em_0 = em[0U] & (uint8_t)0xffU << msBits;
+    em_0 = (uint32_t)em[0U] & 0xffU << msBits;
   }
   else
   {
-    em_0 = (uint8_t)0U;
+    em_0 = 0U;
   }
-  uint8_t em_last = em[emLen - (uint32_t)1U];
-  if (emLen < saltLen + hash_len(a) + (uint32_t)2U)
+  uint8_t em_last = em[emLen - 1U];
+  if (emLen < saltLen + hash_len(a) + 2U)
   {
     return false;
   }
-  if (!(em_last == (uint8_t)0xbcU && em_0 == (uint8_t)0U))
+  if (!(em_last == 0xbcU && em_0 == 0U))
   {
     return false;
   }
-  uint32_t emLen1 = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t emLen1 = (emBits - 1U) / 8U + 1U;
   uint32_t hLen = hash_len(a);
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t *m1Hash0 = (uint8_t *)alloca(hLen * sizeof (uint8_t));
   memset(m1Hash0, 0U, hLen * sizeof (uint8_t));
-  uint32_t dbLen = emLen1 - hLen - (uint32_t)1U;
+  uint32_t dbLen = emLen1 - hLen - 1U;
   uint8_t *maskedDB = em;
   uint8_t *m1Hash = em + dbLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t *dbMask = (uint8_t *)alloca(dbLen * sizeof (uint8_t));
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = dbMask;
-    uint8_t x = dbMask[i] ^ maskedDB[i];
+    uint8_t x = (uint32_t)dbMask[i] ^ (uint32_t)maskedDB[i];
     os[i] = x;
   }
-  uint32_t msBits1 = emBits % (uint32_t)8U;
-  if (msBits1 > (uint32_t)0U)
+  uint32_t msBits1 = emBits % 8U;
+  if (msBits1 > 0U)
   {
-    dbMask[0U] = dbMask[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits1);
+    dbMask[0U] = (uint32_t)dbMask[0U] & 0xffU >> (8U - msBits1);
   }
-  uint32_t padLen = emLen1 - saltLen - hLen - (uint32_t)1U;
+  uint32_t padLen = emLen1 - saltLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), padLen);
   uint8_t *pad2 = (uint8_t *)alloca(padLen * sizeof (uint8_t));
   memset(pad2, 0U, padLen * sizeof (uint8_t));
-  pad2[padLen - (uint32_t)1U] = (uint8_t)0x01U;
+  pad2[padLen - 1U] = 0x01U;
   uint8_t *pad = dbMask;
   uint8_t *salt = dbMask + padLen;
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < padLen; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < padLen; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(pad[i], pad2[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  if (!(z == (uint8_t)255U))
+  if (!(z == 255U))
   {
     return false;
   }
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t *m1 = (uint8_t *)alloca(m1Len * sizeof (uint8_t));
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash0, m1Len, m1);
-  uint8_t res0 = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < hLen; i++)
+  uint8_t res0 = 255U;
+  for (uint32_t i = 0U; i < hLen; i++)
   {
     uint8_t uu____1 = FStar_UInt8_eq_mask(m1Hash0[i], m1Hash[i]);
-    res0 = uu____1 & res0;
+    res0 = (uint32_t)uu____1 & (uint32_t)res0;
   }
   uint8_t z0 = res0;
-  return z0 == (uint8_t)255U;
+  return z0 == 255U;
 }
 
 static inline bool
 load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb, uint64_t *pkey)
 {
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen;
   uint64_t *e = pkey + nLen + nLen;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline bool
@@ -388,16 +383,16 @@ load_skey(
   uint64_t *skey
 )
 {
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   uint64_t *pkey = skey;
   uint64_t *d = skey + pkeyLen;
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  return b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return b && m1 == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -435,45 +430,36 @@ Hacl_RSAPSS_rsapss_sign(
 {
   uint32_t hLen = hash_len(a);
   bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    &&
-      saltLen
-      + hLen
-      + (uint32_t)2U
-      <= (modBits - (uint32_t)1U - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  b = saltLen <= 0xffffffffU - hLen - 8U && saltLen + hLen + 2U <= (modBits - 1U - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t *m = (uint64_t *)alloca(nLen * sizeof (uint64_t));
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t emBits = modBits - (uint32_t)1U;
-    uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t emBits = modBits - 1U;
+    uint32_t emLen = (emBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
     uint8_t *em = (uint8_t *)alloca(emLen * sizeof (uint8_t));
     memset(em, 0U, emLen * sizeof (uint8_t));
     pss_encode(a, saltLen, salt, msgLen, msg, emBits, em);
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(emLen, em, m);
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t *s = (uint64_t *)alloca(nLen1 * sizeof (uint64_t));
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t *m_ = (uint64_t *)alloca(nLen1 * sizeof (uint64_t));
     memset(m_, 0U, nLen1 * sizeof (uint64_t));
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
     uint64_t *n = skey;
     uint64_t *r2 = skey + nLen2;
     uint64_t *e = skey + nLen2 + nLen2;
     uint64_t *d = skey + nLen2 + nLen2 + eLen;
     uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu,
       r2,
@@ -482,9 +468,7 @@ Hacl_RSAPSS_rsapss_sign(
       d,
       s);
     uint64_t mu0 = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu0,
       r2,
@@ -492,22 +476,22 @@ Hacl_RSAPSS_rsapss_sign(
       eBits,
       e,
       m_);
-    uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t uu____0 = FStar_UInt64_eq_mask(m[i], m_[i]);
       mask = uu____0 & mask;
     }
     uint64_t mask1 = mask;
     uint64_t eq_m = mask1;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t *os = s;
       uint64_t x = s[i];
       uint64_t x0 = eq_m & x;
       os[i] = x0;
     }
-    bool eq_b = eq_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    bool eq_b = eq_m == 0xFFFFFFFFFFFFFFFFULL;
     Hacl_Bignum_Convert_bn_to_bytes_be_uint64(k, s, sgnt);
     bool eq_b0 = eq_b;
     return eq_b0;
@@ -547,42 +531,36 @@ Hacl_RSAPSS_rsapss_verify(
 )
 {
   uint32_t hLen = hash_len(a);
-  bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    && sgntLen == (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  bool b = saltLen <= 0xffffffffU - hLen - 8U && sgntLen == (modBits - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t *m = (uint64_t *)alloca(nLen * sizeof (uint64_t));
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t *s = (uint64_t *)alloca(nLen1 * sizeof (uint64_t));
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(k, sgnt, s);
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
     uint64_t *n = pkey;
     uint64_t *r2 = pkey + nLen2;
     uint64_t *e = pkey + nLen2 + nLen2;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t mask = acc;
     bool res;
-    if (mask == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+    if (mask == 0xFFFFFFFFFFFFFFFFULL)
     {
       uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-        / (uint32_t)64U
-        + (uint32_t)1U,
+      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
         n,
         mu,
         r2,
@@ -591,17 +569,17 @@ Hacl_RSAPSS_rsapss_verify(
         e,
         m);
       bool ite;
-      if (!((modBits - (uint32_t)1U) % (uint32_t)8U == (uint32_t)0U))
+      if (!((modBits - 1U) % 8U == 0U))
       {
         ite = true;
       }
       else
       {
-        uint32_t i = (modBits - (uint32_t)1U) / (uint32_t)64U;
-        uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
+        uint32_t i = (modBits - 1U) / 64U;
+        uint32_t j = (modBits - 1U) % 64U;
         uint64_t tmp = m[i];
-        uint64_t get_bit = tmp >> j & (uint64_t)1U;
-        ite = get_bit == (uint64_t)0U;
+        uint64_t get_bit = tmp >> j & 1ULL;
+        ite = get_bit == 0ULL;
       }
       if (ite)
       {
@@ -620,8 +598,8 @@ Hacl_RSAPSS_rsapss_verify(
     bool b10 = b1;
     if (b10)
     {
-      uint32_t emBits = modBits - (uint32_t)1U;
-      uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+      uint32_t emBits = modBits - 1U;
+      uint32_t emLen = (emBits - 1U) / 8U + 1U;
       KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
       uint8_t *em = (uint8_t *)alloca(emLen * sizeof (uint8_t));
       memset(em, 0U, emLen * sizeof (uint8_t));
@@ -649,15 +627,11 @@ uint64_t
 *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb)
 {
   bool ite;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
@@ -667,8 +641,8 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), pkeyLen);
   uint64_t *pkey = (uint64_t *)KRML_HOST_CALLOC(pkeyLen, sizeof (uint64_t));
@@ -678,24 +652,19 @@ uint64_t
   }
   uint64_t *pkey1 = pkey;
   uint64_t *pkey2 = pkey1;
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey2;
   uint64_t *r2 = pkey2 + nLen1;
   uint64_t *e = pkey2 + nLen1 + nLen1;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   if (b)
   {
     return pkey2;
@@ -727,27 +696,23 @@ uint64_t
 )
 {
   bool ite0;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite0 =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite0 = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
     ite0 = false;
   }
   bool ite;
-  if (ite0 && (uint32_t)0U < dBits)
+  if (ite0 && 0U < dBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite = dLen <= (uint32_t)67108863U && (uint32_t)2U * nLen <= (uint32_t)0xffffffffU - eLen - dLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    uint32_t dLen = (dBits - 1U) / 64U + 1U;
+    ite = dLen <= 67108863U && 2U * nLen <= 0xffffffffU - eLen - dLen;
   }
   else
   {
@@ -757,9 +722,9 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
+  uint32_t dLen = (dBits - 1U) / 64U + 1U;
   uint32_t skeyLen = nLen + nLen + eLen + dLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), skeyLen);
   uint64_t *skey = (uint64_t *)KRML_HOST_CALLOC(skeyLen, sizeof (uint64_t));
@@ -769,33 +734,28 @@ uint64_t
   }
   uint64_t *skey1 = skey;
   uint64_t *skey2 = skey1;
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen1 = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen1 = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen1 + nLen1 + eLen1;
   uint64_t *pkey = skey2;
   uint64_t *d = skey2 + pkeyLen;
-  uint32_t nbLen1 = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen1 = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen1 = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen1 = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen2;
   uint64_t *e = pkey + nLen2 + nLen2;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen1, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen1, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m10 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m10;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  bool b0 = b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b0 = b && m1 == 0xFFFFFFFFFFFFFFFFULL;
   if (b0)
   {
     return skey2;
@@ -842,23 +802,17 @@ Hacl_RSAPSS_rsapss_skey_sign(
 )
 {
   KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
+    2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U);
   uint64_t
   *skey =
-    (uint64_t *)alloca(((uint32_t)2U
-      * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-      + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-      + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (uint64_t *)alloca((2U
+      * ((modBits - 1U) / 64U + 1U)
+      + (eBits - 1U) / 64U + 1U
+      + (dBits - 1U) / 64U + 1U)
       * sizeof (uint64_t));
   memset(skey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U)
     * sizeof (uint64_t));
   bool b = load_skey(modBits, eBits, dBits, nb, eb, db, skey);
   if (b)
@@ -911,22 +865,14 @@ Hacl_RSAPSS_rsapss_pkey_verify(
   uint8_t *msg
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
+  KRML_CHECK_SIZE(sizeof (uint64_t), 2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U);
   uint64_t
   *pkey =
-    (uint64_t *)alloca(((uint32_t)2U
-      * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-      + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (uint64_t *)alloca((2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U)
       * sizeof (uint64_t));
   memset(pkey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    * sizeof (uint64_t));
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U) * sizeof (uint64_t));
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   if (b)
   {
diff --git a/src/msvc/Hacl_SHA2_Vec128.c b/src/msvc/Hacl_SHA2_Vec128.c
index e1b6e304..19b56a5c 100644
--- a/src/msvc/Hacl_SHA2_Vec128.c
+++ b/src/msvc/Hacl_SHA2_Vec128.c
@@ -32,9 +32,9 @@
 static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
@@ -46,7 +46,7 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -55,18 +55,18 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -196,14 +196,14 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -218,10 +218,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -229,10 +229,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -252,30 +252,30 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -283,9 +283,9 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -299,17 +299,17 @@ sha224_update_nblocks4(
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha224_update4(mb, st);
@@ -325,53 +325,53 @@ sha224_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -382,7 +382,7 @@ sha224_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha224_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update4(last1, hash);
     return;
@@ -458,18 +458,18 @@ sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -491,10 +491,10 @@ Hacl_SHA2_Vec128_sha224_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha224_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -512,9 +512,9 @@ Hacl_SHA2_Vec128_sha224_4(
 static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
@@ -526,7 +526,7 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -535,18 +535,18 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -676,14 +676,14 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -698,10 +698,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -709,10 +709,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -732,30 +732,30 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -763,9 +763,9 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -779,17 +779,17 @@ sha256_update_nblocks4(
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha256_update4(mb, st);
@@ -805,53 +805,53 @@ sha256_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -862,7 +862,7 @@ sha256_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha256_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update4(last1, hash);
     return;
@@ -938,18 +938,18 @@ sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -971,10 +971,10 @@ Hacl_SHA2_Vec128_sha256_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha256_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
diff --git a/src/msvc/Hacl_SHA2_Vec256.c b/src/msvc/Hacl_SHA2_Vec256.c
index b74ce621..37d903ad 100644
--- a/src/msvc/Hacl_SHA2_Vec256.c
+++ b/src/msvc/Hacl_SHA2_Vec256.c
@@ -33,9 +33,9 @@
 static inline void sha224_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
@@ -47,7 +47,7 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -64,14 +64,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -281,14 +281,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -303,10 +303,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -314,10 +314,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -337,30 +337,30 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -368,9 +368,9 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -384,8 +384,8 @@ sha224_update_nblocks8(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -395,14 +395,14 @@ sha224_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_8p
     mb =
       {
@@ -431,18 +431,18 @@ sha224_update_last8(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -453,67 +453,67 @@ sha224_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
   Hacl_Impl_SHA2_Types_uint8_8p
@@ -550,7 +550,7 @@ sha224_update_last8(
   Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
   sha224_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update8(last1, hash);
     return;
@@ -662,10 +662,10 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -674,14 +674,14 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 28U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 28U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 28U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -740,10 +740,10 @@ Hacl_SHA2_Vec256_sha224_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha224_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -782,9 +782,9 @@ Hacl_SHA2_Vec256_sha224_8(
 static inline void sha256_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
@@ -796,7 +796,7 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -813,14 +813,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1030,14 +1030,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1052,10 +1052,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1063,10 +1063,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1086,30 +1086,30 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -1117,9 +1117,9 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -1133,8 +1133,8 @@ sha256_update_nblocks8(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1144,14 +1144,14 @@ sha256_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
     Hacl_Impl_SHA2_Types_uint8_8p
     mb =
       {
@@ -1180,18 +1180,18 @@ sha256_update_last8(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1202,67 +1202,67 @@ sha256_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
   Hacl_Impl_SHA2_Types_uint8_8p
@@ -1299,7 +1299,7 @@ sha256_update_last8(
   Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
   sha256_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update8(last1, hash);
     return;
@@ -1411,10 +1411,10 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -1423,14 +1423,14 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 32U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 32U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 32U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -1489,10 +1489,10 @@ Hacl_SHA2_Vec256_sha256_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha256_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -1531,9 +1531,9 @@ Hacl_SHA2_Vec256_sha256_8(
 static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint64_t hi = Hacl_Impl_SHA2_Generic_h384[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
@@ -1545,7 +1545,7 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -1554,18 +1554,18 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1679,14 +1679,14 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1701,10 +1701,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1712,10 +1712,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1735,30 +1735,30 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -1766,9 +1766,9 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -1782,17 +1782,17 @@ sha384_update_nblocks4(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha384_update4(mb, st);
@@ -1808,53 +1808,53 @@ sha384_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -1865,7 +1865,7 @@ sha384_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha384_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha384_update4(last1, hash);
     return;
@@ -1933,18 +1933,18 @@ sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)48U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 48U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 48U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 48U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 48U * sizeof (uint8_t));
 }
 
 void
@@ -1966,10 +1966,10 @@ Hacl_SHA2_Vec256_sha384_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha384_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha384_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -1987,9 +1987,9 @@ Hacl_SHA2_Vec256_sha384_4(
 static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     uint64_t hi = Hacl_Impl_SHA2_Generic_h512[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
@@ -2001,7 +2001,7 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -2010,18 +2010,18 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -2135,14 +2135,14 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -2157,10 +2157,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -2168,10 +2168,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -2191,30 +2191,30 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -2222,9 +2222,9 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -2238,17 +2238,17 @@ sha512_update_nblocks4(
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
     Hacl_Impl_SHA2_Types_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha512_update4(mb, st);
@@ -2264,53 +2264,53 @@ sha512_update_last4(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
   Hacl_Impl_SHA2_Types_uint8_4p
@@ -2321,7 +2321,7 @@ sha512_update_last4(
   Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
   Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
   sha512_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update4(last1, hash);
     return;
@@ -2389,18 +2389,18 @@ sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 64U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 64U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 64U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 64U * sizeof (uint8_t));
 }
 
 void
@@ -2422,10 +2422,10 @@ Hacl_SHA2_Vec256_sha512_4(
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha512_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha512_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
diff --git a/src/msvc/Hacl_Salsa20.c b/src/msvc/Hacl_Salsa20.c
index 2758f8a4..151df07d 100644
--- a/src/msvc/Hacl_Salsa20.c
+++ b/src/msvc/Hacl_Salsa20.c
@@ -30,35 +30,35 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t sta = st[b];
   uint32_t stb0 = st[a];
   uint32_t std0 = st[d];
-  uint32_t sta1 = sta ^ ((stb0 + std0) << (uint32_t)7U | (stb0 + std0) >> (uint32_t)25U);
+  uint32_t sta1 = sta ^ ((stb0 + std0) << 7U | (stb0 + std0) >> 25U);
   st[b] = sta1;
   uint32_t sta0 = st[c];
   uint32_t stb1 = st[b];
   uint32_t std1 = st[a];
-  uint32_t sta10 = sta0 ^ ((stb1 + std1) << (uint32_t)9U | (stb1 + std1) >> (uint32_t)23U);
+  uint32_t sta10 = sta0 ^ ((stb1 + std1) << 9U | (stb1 + std1) >> 23U);
   st[c] = sta10;
   uint32_t sta2 = st[d];
   uint32_t stb2 = st[c];
   uint32_t std2 = st[b];
-  uint32_t sta11 = sta2 ^ ((stb2 + std2) << (uint32_t)13U | (stb2 + std2) >> (uint32_t)19U);
+  uint32_t sta11 = sta2 ^ ((stb2 + std2) << 13U | (stb2 + std2) >> 19U);
   st[d] = sta11;
   uint32_t sta3 = st[a];
   uint32_t stb = st[d];
   uint32_t std = st[c];
-  uint32_t sta12 = sta3 ^ ((stb + std) << (uint32_t)18U | (stb + std) >> (uint32_t)14U);
+  uint32_t sta12 = sta3 ^ ((stb + std) << 18U | (stb + std) >> 14U);
   st[a] = sta12;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U, (uint32_t)1U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)6U, (uint32_t)7U, (uint32_t)4U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)11U, (uint32_t)8U, (uint32_t)9U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)12U, (uint32_t)13U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 5U, 9U, 13U, 1U);
+  quarter_round(st, 10U, 14U, 2U, 6U);
+  quarter_round(st, 15U, 3U, 7U, 11U);
+  quarter_round(st, 0U, 1U, 2U, 3U);
+  quarter_round(st, 5U, 6U, 7U, 4U);
+  quarter_round(st, 10U, 11U, 8U, 9U);
+  quarter_round(st, 15U, 12U, 13U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -77,14 +77,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void salsa20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[8U] = k[8U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -98,42 +98,38 @@ static inline void salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
-  ctx[8U] = (uint32_t)0U;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
-  salsa20_core(k, ctx, (uint32_t)0U);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, k[i]););
+  uint32_t *k1 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
+  ctx[8U] = 0U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
+  salsa20_core(k, ctx, 0U);
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, k[i]););
 }
 
 static inline void
@@ -150,101 +146,93 @@ salsa20_encrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -263,101 +251,93 @@ salsa20_decrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -368,34 +348,34 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[4U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  ctx[0U] = (uint32_t)0x61707865U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)4U * sizeof (uint32_t));
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  uint32_t *k1 = k32 + 4U;
+  ctx[0U] = 0x61707865U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 4U * sizeof (uint32_t));
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   rounds(ctx);
   uint32_t r0 = ctx[0U];
   uint32_t r1 = ctx[5U];
@@ -406,11 +386,7 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t r6 = ctx[8U];
   uint32_t r7 = ctx[9U];
   uint32_t res[8U] = { r0, r1, r2, r3, r4, r5, r6, r7 };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, res[i]););
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(out + i * 4U, res[i]););
 }
 
 void
diff --git a/src/msvc/Hacl_Streaming_Blake2.c b/src/msvc/Hacl_Streaming_Blake2.c
index 948d56c2..ae1f3181 100644
--- a/src/msvc/Hacl_Streaming_Blake2.c
+++ b/src/msvc/Hacl_Streaming_Blake2.c
@@ -30,19 +30,19 @@
 */
 Hacl_Streaming_Blake2_blake2s_32_state *Hacl_Streaming_Blake2_blake2s_32_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
+  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
   Hacl_Streaming_Blake2_blake2s_32_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2_blake2s_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2_blake2s_32_state
   *p =
     (Hacl_Streaming_Blake2_blake2s_32_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2_blake2s_32_state
       ));
   p[0U] = s1;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(block_state.snd, 0U, 32U);
   return p;
 }
 
@@ -54,9 +54,9 @@ void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_
   Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_32_blake2s_init(block_state.snd, 0U, 32U);
   Hacl_Streaming_Blake2_blake2s_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s1[0U] = tmp;
 }
 
@@ -72,33 +72,33 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
 {
   Hacl_Streaming_Blake2_blake2s_32_state s1 = *p;
   uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -113,46 +113,46 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint32_t *wv = block_state1.fst;
       uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_32_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     uint32_t *wv = block_state1.fst;
     uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
@@ -168,7 +168,7 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
@@ -176,13 +176,13 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
     uint8_t *buf0 = s2.buf;
     uint64_t total_len10 = s2.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -201,45 +201,39 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_update(
     uint8_t *buf = s20.buf;
     uint64_t total_len1 = s20.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint32_t *wv = block_state1.fst;
       uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_32_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     uint32_t *wv = block_state1.fst;
     uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
@@ -270,13 +264,13 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t wv0[16U] = { 0U };
@@ -284,28 +278,28 @@ Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
   Hacl_Streaming_Blake2_blake2s_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
   uint32_t *src_b = block_state.snd;
   uint32_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(dst_b, src_b, 16U * sizeof (uint32_t));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   uint32_t *wv1 = tmp_block_state.fst;
   uint32_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint32_t nb = 0U;
+  Hacl_Blake2s_32_blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   uint32_t *wv = tmp_block_state.fst;
   uint32_t *hash = tmp_block_state.snd;
   Hacl_Blake2s_32_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
+  Hacl_Blake2s_32_blake2s_finish(32U, dst, tmp_block_state.snd);
 }
 
 /**
@@ -329,19 +323,19 @@ void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_
 */
 Hacl_Streaming_Blake2_blake2b_32_state *Hacl_Streaming_Blake2_blake2b_32_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
+  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
   Hacl_Streaming_Blake2_blake2b_32_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2_blake2b_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2_blake2b_32_state
   *p =
     (Hacl_Streaming_Blake2_blake2b_32_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2_blake2b_32_state
       ));
   p[0U] = s1;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(block_state.snd, 0U, 64U);
   return p;
 }
 
@@ -353,9 +347,9 @@ void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_
   Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_32_blake2b_init(block_state.snd, 0U, 64U);
   Hacl_Streaming_Blake2_blake2b_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s1[0U] = tmp;
 }
 
@@ -371,33 +365,33 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
 {
   Hacl_Streaming_Blake2_blake2b_32_state s1 = *p;
   uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (len <= 128U - sz)
   {
     Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -412,28 +406,28 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
     Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
     uint8_t *buf = s2.buf;
     uint64_t total_len1 = s2.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint64_t *wv = block_state1.fst;
       uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_32_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -441,22 +435,22 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
         nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)128U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     uint64_t *wv = block_state1.fst;
     uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_32_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -477,7 +471,7 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
+    uint32_t diff = 128U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
@@ -485,13 +479,13 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
     uint8_t *buf0 = s2.buf;
     uint64_t total_len10 = s2.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -510,21 +504,21 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
     uint8_t *buf = s20.buf;
     uint64_t total_len1 = s20.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       uint64_t *wv = block_state1.fst;
       uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_32_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -532,28 +526,22 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_update(
         nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     uint64_t *wv = block_state1.fst;
     uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_32_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -589,13 +577,13 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t wv0[16U] = { 0U };
@@ -603,23 +591,23 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
   Hacl_Streaming_Blake2_blake2b_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
   uint64_t *src_b = block_state.snd;
   uint64_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint64_t));
+  memcpy(dst_b, src_b, 16U * sizeof (uint64_t));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   uint64_t *wv1 = tmp_block_state.fst;
   uint64_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)0U,
+  uint32_t nb = 0U;
+  Hacl_Blake2b_32_blake2b_update_multi(0U,
     wv1,
     hash0,
     FStar_UInt128_uint64_to_uint128(prev_len),
@@ -634,7 +622,7 @@ Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
     FStar_UInt128_uint64_to_uint128(prev_len_last),
     r,
     buf_last);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
+  Hacl_Blake2b_32_blake2b_finish(64U, dst, tmp_block_state.snd);
 }
 
 /**
diff --git a/src/msvc/Hacl_Streaming_Blake2b_256.c b/src/msvc/Hacl_Streaming_Blake2b_256.c
index bdb5433f..fee698bf 100644
--- a/src/msvc/Hacl_Streaming_Blake2b_256.c
+++ b/src/msvc/Hacl_Streaming_Blake2b_256.c
@@ -31,27 +31,27 @@
 Hacl_Streaming_Blake2b_256_blake2b_256_state
 *Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec256
   *wv =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Lib_IntVector_Intrinsics_vec256
   *b =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2b_256_blake2b_256_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2b_256_blake2b_256_state
   *p =
     (Hacl_Streaming_Blake2b_256_blake2b_256_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2b_256_blake2b_256_state
       ));
   p[0U] = s;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_256_blake2b_init(block_state.snd, 0U, 64U);
   return p;
 }
 
@@ -66,9 +66,9 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init(
   Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Blake2b_256_blake2b_init(block_state.snd, 0U, 64U);
   Hacl_Streaming_Blake2b_256_blake2b_256_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -84,33 +84,33 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
 {
   Hacl_Streaming_Blake2b_256_blake2b_256_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (len <= 128U - sz)
   {
     Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
     Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -125,28 +125,28 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
     Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_256_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -154,22 +154,22 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
         nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)128U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_256_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -190,7 +190,7 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
+    uint32_t diff = 128U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
@@ -198,13 +198,13 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -223,21 +223,21 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+      uint32_t nb = 1U;
+      Hacl_Blake2b_256_blake2b_update_multi(128U,
         wv,
         hash,
         FStar_UInt128_uint64_to_uint128(prevlen),
@@ -245,28 +245,22 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
         nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
+    uint32_t n_blocks = (len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
+    uint32_t nb = data1_len / 128U;
     Hacl_Blake2b_256_blake2b_update_multi(data1_len,
       wv,
       hash,
@@ -302,13 +296,13 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
@@ -316,23 +310,23 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
   Hacl_Streaming_Blake2b_256_blake2b_256_block_state tmp_block_state = { .fst = wv0, .snd = b };
   Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd;
   Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst;
   Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)0U,
+  uint32_t nb = 0U;
+  Hacl_Blake2b_256_blake2b_update_multi(0U,
     wv1,
     hash0,
     FStar_UInt128_uint64_to_uint128(prev_len),
@@ -347,7 +341,7 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
     FStar_UInt128_uint64_to_uint128(prev_len_last),
     r,
     buf_last);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
+  Hacl_Blake2b_256_blake2b_finish(64U, dst, tmp_block_state.snd);
 }
 
 /**
diff --git a/src/msvc/Hacl_Streaming_Blake2s_128.c b/src/msvc/Hacl_Streaming_Blake2s_128.c
index f97bf5d0..03bc4e13 100644
--- a/src/msvc/Hacl_Streaming_Blake2s_128.c
+++ b/src/msvc/Hacl_Streaming_Blake2s_128.c
@@ -31,27 +31,27 @@
 Hacl_Streaming_Blake2s_128_blake2s_128_state
 *Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec128
   *wv =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Lib_IntVector_Intrinsics_vec128
   *b =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = { .fst = wv, .snd = b };
   Hacl_Streaming_Blake2s_128_blake2s_128_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_Blake2s_128_blake2s_128_state
   *p =
     (Hacl_Streaming_Blake2s_128_blake2s_128_state *)KRML_HOST_MALLOC(sizeof (
         Hacl_Streaming_Blake2s_128_blake2s_128_state
       ));
   p[0U] = s;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_128_blake2s_init(block_state.snd, 0U, 32U);
   return p;
 }
 
@@ -66,9 +66,9 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init(
   Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s;
   uint8_t *buf = scrut.buf;
   Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Blake2s_128_blake2s_init(block_state.snd, 0U, 32U);
   Hacl_Streaming_Blake2s_128_blake2s_128_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   s[0U] = tmp;
 }
 
@@ -84,33 +84,33 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
 {
   Hacl_Streaming_Blake2s_128_blake2s_128_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
     Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -125,46 +125,46 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
     Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_128_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
     Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
@@ -180,7 +180,7 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
@@ -188,13 +188,13 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -213,45 +213,39 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       uint64_t prevlen = total_len1 - (uint64_t)sz1;
       Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
       Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
+      uint32_t nb = 1U;
+      Hacl_Blake2s_128_blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
     Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
     Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
+    uint32_t nb = data1_len / 64U;
     Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
     uint8_t *dst = buf;
     memcpy(dst, data21, data2_len * sizeof (uint8_t));
@@ -282,13 +276,13 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
@@ -296,28 +290,28 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
   Hacl_Streaming_Blake2s_128_blake2s_128_block_state tmp_block_state = { .fst = wv0, .snd = b };
   Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd;
   Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
   Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst;
   Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint32_t nb = 0U;
+  Hacl_Blake2s_128_blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst;
   Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd;
   Hacl_Blake2s_128_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
+  Hacl_Blake2s_128_blake2s_finish(32U, dst, tmp_block_state.snd);
 }
 
 /**
diff --git a/src/msvc/Hacl_Streaming_Poly1305_128.c b/src/msvc/Hacl_Streaming_Poly1305_128.c
index c3f7c19a..e8275b99 100644
--- a/src/msvc/Hacl_Streaming_Poly1305_128.c
+++ b/src/msvc/Hacl_Streaming_Poly1305_128.c
@@ -28,19 +28,18 @@
 Hacl_Streaming_Poly1305_128_poly1305_128_state
 *Hacl_Streaming_Poly1305_128_create_in(uint8_t *k)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec128
   *r1 =
     (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Lib_IntVector_Intrinsics_vec128 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_0 = k_;
   Hacl_Streaming_Poly1305_128_poly1305_128_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
   Hacl_Streaming_Poly1305_128_poly1305_128_state
   *p =
     (Hacl_Streaming_Poly1305_128_poly1305_128_state *)KRML_HOST_MALLOC(sizeof (
@@ -59,11 +58,10 @@ Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly130
   uint8_t *buf = scrut.buf;
   Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
   Hacl_Poly1305_128_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_1 = k_;
   Hacl_Streaming_Poly1305_128_poly1305_128_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
   s[0U] = tmp;
 }
 
@@ -79,20 +77,20 @@ Hacl_Streaming_Poly1305_128_update(
 {
   Hacl_Streaming_Poly1305_128_poly1305_128_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)32U;
+    sz = 32U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
+    sz = (uint32_t)(total_len % (uint64_t)32U);
   }
-  if (len <= (uint32_t)32U - sz)
+  if (len <= 32U - sz)
   {
     Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
     Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
@@ -100,13 +98,13 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)32U;
+      sz1 = 32U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -122,7 +120,7 @@ Hacl_Streaming_Poly1305_128_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
     Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
@@ -130,29 +128,29 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)32U;
+      sz1 = 32U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
+      Hacl_Poly1305_128_poly1305_update(block_state1, 32U, buf);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)32U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)32U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)32U;
+      ite = 32U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)32U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)32U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
+    uint32_t n_blocks = (len - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
@@ -172,7 +170,7 @@ Hacl_Streaming_Poly1305_128_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)32U - sz;
+    uint32_t diff = 32U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
@@ -181,13 +179,13 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len10 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)32U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)32U;
+      sz10 = 32U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)32U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)32U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -208,35 +206,29 @@ Hacl_Streaming_Poly1305_128_update(
     uint64_t total_len1 = s10.total_len;
     uint8_t *k_10 = s10.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)32U;
+      sz1 = 32U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
+      Hacl_Poly1305_128_poly1305_update(block_state1, 32U, buf);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)32U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)32U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)32U;
+      ite = 32U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)32U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)32U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
+    uint32_t n_blocks = (len - diff - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
@@ -269,61 +261,51 @@ Hacl_Streaming_Poly1305_128_finish(
   uint64_t total_len = scrut.total_len;
   uint8_t *k_ = scrut.p_key;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)32U;
+    r = 32U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
+    r = (uint32_t)(total_len % (uint64_t)32U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U };
   Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite0 = (uint32_t)16U;
+    ite0 = 16U;
   }
   else
   {
-    ite0 = r % (uint32_t)16U;
+    ite0 = r % 16U;
   }
   uint8_t *buf_last = buf_1 + r - ite0;
   uint8_t *buf_multi = buf_1;
   uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite = (uint32_t)16U;
+    ite = 16U;
   }
   else
   {
-    ite = r % (uint32_t)16U;
+    ite = r % 16U;
   }
   Hacl_Poly1305_128_poly1305_update(tmp_block_state, r - ite, buf_multi);
   uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite1 = (uint32_t)16U;
+    ite1 = 16U;
   }
   else
   {
-    ite1 = r % (uint32_t)16U;
+    ite1 = r % 16U;
   }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_128_poly1305_update(tmp_block_state, ite2, buf_last);
+  Hacl_Poly1305_128_poly1305_update(tmp_block_state, ite1, buf_last);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 tmp[25U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
   Hacl_Poly1305_128_poly1305_finish(dst, k_, tmp);
 }
 
diff --git a/src/msvc/Hacl_Streaming_Poly1305_256.c b/src/msvc/Hacl_Streaming_Poly1305_256.c
index e56275a4..ff769af9 100644
--- a/src/msvc/Hacl_Streaming_Poly1305_256.c
+++ b/src/msvc/Hacl_Streaming_Poly1305_256.c
@@ -28,19 +28,18 @@
 Hacl_Streaming_Poly1305_256_poly1305_256_state
 *Hacl_Streaming_Poly1305_256_create_in(uint8_t *k)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
   Lib_IntVector_Intrinsics_vec256
   *r1 =
     (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Lib_IntVector_Intrinsics_vec256 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_0 = k_;
   Hacl_Streaming_Poly1305_256_poly1305_256_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
   Hacl_Streaming_Poly1305_256_poly1305_256_state
   *p =
     (Hacl_Streaming_Poly1305_256_poly1305_256_state *)KRML_HOST_MALLOC(sizeof (
@@ -59,11 +58,10 @@ Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly130
   uint8_t *buf = scrut.buf;
   Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
   Hacl_Poly1305_256_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_1 = k_;
   Hacl_Streaming_Poly1305_256_poly1305_256_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
   s[0U] = tmp;
 }
 
@@ -79,20 +77,20 @@ Hacl_Streaming_Poly1305_256_update(
 {
   Hacl_Streaming_Poly1305_256_poly1305_256_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (len <= 64U - sz)
   {
     Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
     Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
@@ -100,13 +98,13 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -122,7 +120,7 @@ Hacl_Streaming_Poly1305_256_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
     Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
@@ -130,29 +128,29 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
+      Hacl_Poly1305_256_poly1305_update(block_state1, 64U, buf);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)64U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
@@ -172,7 +170,7 @@ Hacl_Streaming_Poly1305_256_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
+    uint32_t diff = 64U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
@@ -181,13 +179,13 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len10 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -208,35 +206,29 @@ Hacl_Streaming_Poly1305_256_update(
     uint64_t total_len1 = s10.total_len;
     uint8_t *k_10 = s10.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
+      Hacl_Poly1305_256_poly1305_update(block_state1, 64U, buf);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
+    uint32_t n_blocks = (len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
@@ -269,61 +261,51 @@ Hacl_Streaming_Poly1305_256_finish(
   uint64_t total_len = scrut.total_len;
   uint8_t *k_ = scrut.p_key;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U };
   Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite0 = (uint32_t)16U;
+    ite0 = 16U;
   }
   else
   {
-    ite0 = r % (uint32_t)16U;
+    ite0 = r % 16U;
   }
   uint8_t *buf_last = buf_1 + r - ite0;
   uint8_t *buf_multi = buf_1;
   uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite = (uint32_t)16U;
+    ite = 16U;
   }
   else
   {
-    ite = r % (uint32_t)16U;
+    ite = r % 16U;
   }
   Hacl_Poly1305_256_poly1305_update(tmp_block_state, r - ite, buf_multi);
   uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite1 = (uint32_t)16U;
+    ite1 = 16U;
   }
   else
   {
-    ite1 = r % (uint32_t)16U;
+    ite1 = r % 16U;
   }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_256_poly1305_update(tmp_block_state, ite2, buf_last);
+  Hacl_Poly1305_256_poly1305_update(tmp_block_state, ite1, buf_last);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 tmp[25U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
   Hacl_Poly1305_256_poly1305_finish(dst, k_, tmp);
 }
 
diff --git a/src/msvc/Hacl_Streaming_Poly1305_32.c b/src/msvc/Hacl_Streaming_Poly1305_32.c
index 249a622f..b1eb12b2 100644
--- a/src/msvc/Hacl_Streaming_Poly1305_32.c
+++ b/src/msvc/Hacl_Streaming_Poly1305_32.c
@@ -27,15 +27,14 @@
 
 Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint8_t));
-  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t));
+  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
   uint64_t *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_0 = k_;
   Hacl_Streaming_Poly1305_32_poly1305_32_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
   Hacl_Streaming_Poly1305_32_poly1305_32_state
   *p =
     (Hacl_Streaming_Poly1305_32_poly1305_32_state *)KRML_HOST_MALLOC(sizeof (
@@ -54,11 +53,10 @@ Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
   Hacl_Poly1305_32_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(k_, k, 32U * sizeof (uint8_t));
   uint8_t *k_1 = k_;
   Hacl_Streaming_Poly1305_32_poly1305_32_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
   s[0U] = tmp;
 }
 
@@ -74,20 +72,20 @@ Hacl_Streaming_Poly1305_32_update(
 {
   Hacl_Streaming_Poly1305_32_poly1305_32_state s = *p;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
+  if ((uint64_t)len > 0xffffffffULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)16U;
+    sz = 16U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
+    sz = (uint32_t)(total_len % (uint64_t)16U);
   }
-  if (len <= (uint32_t)16U - sz)
+  if (len <= 16U - sz)
   {
     Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
     uint64_t *block_state1 = s1.block_state;
@@ -95,13 +93,13 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)16U;
+      sz1 = 16U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
     }
     uint8_t *buf2 = buf + sz1;
     memcpy(buf2, data, len * sizeof (uint8_t));
@@ -117,7 +115,7 @@ Hacl_Streaming_Poly1305_32_update(
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
     Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
     uint64_t *block_state1 = s1.block_state;
@@ -125,29 +123,29 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len1 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)16U;
+      sz1 = 16U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
+      Hacl_Poly1305_32_poly1305_update(block_state1, 16U, buf);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)16U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)len % (uint64_t)16U == 0ULL && (uint64_t)len > 0ULL)
     {
-      ite = (uint32_t)16U;
+      ite = 16U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)16U);
+      ite = (uint32_t)((uint64_t)len % (uint64_t)16U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
+    uint32_t n_blocks = (len - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
     uint32_t data2_len = len - data1_len;
     uint8_t *data1 = data;
     uint8_t *data2 = data + data1_len;
@@ -167,7 +165,7 @@ Hacl_Streaming_Poly1305_32_update(
   }
   else
   {
-    uint32_t diff = (uint32_t)16U - sz;
+    uint32_t diff = 16U - sz;
     uint8_t *data1 = data;
     uint8_t *data2 = data + diff;
     Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
@@ -176,13 +174,13 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len10 = s1.total_len;
     uint8_t *k_1 = s1.p_key;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)16U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)16U;
+      sz10 = 16U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)16U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)16U);
     }
     uint8_t *buf2 = buf0 + sz10;
     memcpy(buf2, data1, diff * sizeof (uint8_t));
@@ -203,35 +201,29 @@ Hacl_Streaming_Poly1305_32_update(
     uint64_t total_len1 = s10.total_len;
     uint8_t *k_10 = s10.p_key;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)16U;
+      sz1 = 16U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
+      Hacl_Poly1305_32_poly1305_update(block_state1, 16U, buf);
     }
     uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)16U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    if ((uint64_t)(len - diff) % (uint64_t)16U == 0ULL && (uint64_t)(len - diff) > 0ULL)
     {
-      ite = (uint32_t)16U;
+      ite = 16U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)16U);
+      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)16U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
+    uint32_t n_blocks = (len - diff - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
     uint32_t data2_len = len - diff - data1_len;
     uint8_t *data11 = data2;
     uint8_t *data21 = data2 + data1_len;
@@ -264,33 +256,33 @@ Hacl_Streaming_Poly1305_32_finish(
   uint64_t total_len = scrut.total_len;
   uint8_t *k_ = scrut.p_key;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)16U;
+    r = 16U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
+    r = (uint32_t)(total_len % (uint64_t)16U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t r1[25U] = { 0U };
   uint64_t *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 25U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 16U == 0U && r > 0U)
   {
-    ite = (uint32_t)16U;
+    ite = 16U;
   }
   else
   {
-    ite = r % (uint32_t)16U;
+    ite = r % 16U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Poly1305_32_poly1305_update(tmp_block_state, (uint32_t)0U, buf_multi);
+  Hacl_Poly1305_32_poly1305_update(tmp_block_state, 0U, buf_multi);
   Hacl_Poly1305_32_poly1305_update(tmp_block_state, r, buf_last);
   uint64_t tmp[25U] = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(tmp, tmp_block_state, 25U * sizeof (uint64_t));
   Hacl_Poly1305_32_poly1305_finish(dst, k_, tmp);
 }
 
diff --git a/src/wasm/EverCrypt_Hash.wasm b/src/wasm/EverCrypt_Hash.wasm
index 8fdc7b27..7264f0b8 100644
Binary files a/src/wasm/EverCrypt_Hash.wasm and b/src/wasm/EverCrypt_Hash.wasm differ
diff --git a/src/wasm/Hacl_Bignum.wasm b/src/wasm/Hacl_Bignum.wasm
index b9c99c89..6e090b50 100644
Binary files a/src/wasm/Hacl_Bignum.wasm and b/src/wasm/Hacl_Bignum.wasm differ
diff --git a/src/wasm/Hacl_Bignum256.wasm b/src/wasm/Hacl_Bignum256.wasm
index 24cf0406..b28b276b 100644
Binary files a/src/wasm/Hacl_Bignum256.wasm and b/src/wasm/Hacl_Bignum256.wasm differ
diff --git a/src/wasm/Hacl_Bignum256_32.wasm b/src/wasm/Hacl_Bignum256_32.wasm
index d949c878..05db6caa 100644
Binary files a/src/wasm/Hacl_Bignum256_32.wasm and b/src/wasm/Hacl_Bignum256_32.wasm differ
diff --git a/src/wasm/Hacl_Bignum32.wasm b/src/wasm/Hacl_Bignum32.wasm
index fa107b62..c2102b81 100644
Binary files a/src/wasm/Hacl_Bignum32.wasm and b/src/wasm/Hacl_Bignum32.wasm differ
diff --git a/src/wasm/Hacl_Bignum4096.wasm b/src/wasm/Hacl_Bignum4096.wasm
index c1ced14d..6cc1bf47 100644
Binary files a/src/wasm/Hacl_Bignum4096.wasm and b/src/wasm/Hacl_Bignum4096.wasm differ
diff --git a/src/wasm/Hacl_Bignum4096_32.wasm b/src/wasm/Hacl_Bignum4096_32.wasm
index a088be23..35bcb037 100644
Binary files a/src/wasm/Hacl_Bignum4096_32.wasm and b/src/wasm/Hacl_Bignum4096_32.wasm differ
diff --git a/src/wasm/Hacl_Bignum64.wasm b/src/wasm/Hacl_Bignum64.wasm
index edc590b1..d7db1531 100644
Binary files a/src/wasm/Hacl_Bignum64.wasm and b/src/wasm/Hacl_Bignum64.wasm differ
diff --git a/src/wasm/Hacl_Ed25519.wasm b/src/wasm/Hacl_Ed25519.wasm
index 57ca4d36..bd073fd6 100644
Binary files a/src/wasm/Hacl_Ed25519.wasm and b/src/wasm/Hacl_Ed25519.wasm differ
diff --git a/src/wasm/Hacl_Streaming_Blake2.wasm b/src/wasm/Hacl_Streaming_Blake2.wasm
index ff2f0e69..80055201 100644
Binary files a/src/wasm/Hacl_Streaming_Blake2.wasm and b/src/wasm/Hacl_Streaming_Blake2.wasm differ
diff --git a/src/wasm/Hacl_Streaming_Blake2b_256.wasm b/src/wasm/Hacl_Streaming_Blake2b_256.wasm
index 36e0d792..41c0bf02 100644
Binary files a/src/wasm/Hacl_Streaming_Blake2b_256.wasm and b/src/wasm/Hacl_Streaming_Blake2b_256.wasm differ
diff --git a/src/wasm/Hacl_Streaming_Blake2s_128.wasm b/src/wasm/Hacl_Streaming_Blake2s_128.wasm
index 0fb08566..8292fdfe 100644
Binary files a/src/wasm/Hacl_Streaming_Blake2s_128.wasm and b/src/wasm/Hacl_Streaming_Blake2s_128.wasm differ
diff --git a/src/wasm/INFO.txt b/src/wasm/INFO.txt
index 60cb7b00..bfdf57fb 100644
--- a/src/wasm/INFO.txt
+++ b/src/wasm/INFO.txt
@@ -1,4 +1,4 @@
 This code was generated with the following toolchain.
-F* version: bc622701c668f6b4092760879372968265d4a4e1
-Karamel version: 7cffd27cfefbd220e986e561e8d350f043609f76
+F* version: 0b0b27995eb98ad7c250c759ea4e7c579b7c4c85
+Karamel version: a7be2a7c43eca637ceb57fe8f3ffd16fc6627ebd
 Vale version: 0.3.19